From 8b5697fc8bedab81c22d1b1f0c5ad7a02ef3afb6 Mon Sep 17 00:00:00 2001 From: Christian Legnitto Date: Fri, 24 Jan 2025 13:59:40 -0400 Subject: [PATCH 1/9] Logging to track down https://github.com/Rust-GPU/rust-gpu/issues/46 --- .../src/builder/builder_methods.rs | 201 +++++++++++++++++- .../src/codegen_cx/constant.rs | 12 +- crates/rustc_codegen_spirv/src/lib.rs | 7 +- tests/ui/lang/issue-46.rs | 9 +- tests/ui/lang/issue-46.stderr | 48 ++--- 5 files changed, 221 insertions(+), 56 deletions(-) diff --git a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs index 954599fb1d..263e8fa609 100644 --- a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs +++ b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs @@ -35,6 +35,9 @@ use std::cell::Cell; use std::iter::{self, empty}; use std::ops::RangeInclusive; +use tracing::{Level, instrument, span}; +use tracing::{debug, warn}; + macro_rules! simple_op { ( $func_name:ident, $inst_name:ident @@ -154,6 +157,7 @@ fn memset_dynamic_scalar( } impl<'a, 'tcx> Builder<'a, 'tcx> { + #[instrument(level = "trace", skip(self))] fn ordering_to_semantics_def(&self, ordering: AtomicOrdering) -> SpirvValue { let mut invalid_seq_cst = false; let semantics = match ordering { @@ -189,6 +193,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { semantics } + #[instrument(level = "trace", skip(self))] fn memset_const_pattern(&self, ty: &SpirvType<'tcx>, fill_byte: u8) -> Word { match *ty { SpirvType::Void => self.fatal("memset invalid on void pattern"), @@ -276,6 +281,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + #[instrument(level = "trace", skip(self))] fn memset_dynamic_pattern(&self, ty: &SpirvType<'tcx>, fill_var: Word) -> Word { match *ty { SpirvType::Void => self.fatal("memset invalid on void pattern"), @@ -332,6 +338,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + #[instrument(level = "trace", skip(self))] fn memset_constant_size(&mut self, ptr: SpirvValue, pat: SpirvValue, size_bytes: u64) { let size_elem = self .lookup_type(pat.ty) @@ -350,6 +357,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } // TODO: Test this is correct + #[instrument(level = "trace", skip(self))] fn memset_dynamic_size(&mut self, ptr: SpirvValue, pat: SpirvValue, size_bytes: SpirvValue) { let size_elem = self .lookup_type(pat.ty) @@ -384,14 +392,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.switch_to_block(exit_bb); } + #[instrument(level = "trace", skip(self))] fn zombie_convert_ptr_to_u(&self, def: Word) { self.zombie(def, "cannot convert pointers to integers"); } + #[instrument(level = "trace", skip(self))] fn zombie_convert_u_to_ptr(&self, def: Word) { self.zombie(def, "cannot convert integers to pointers"); } + #[instrument(level = "trace", skip(self))] fn zombie_ptr_equal(&self, def: Word, inst: &str) { if !self.builder.has_capability(Capability::VariablePointers) { self.zombie( @@ -406,11 +417,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // // HACK(eddyb) temporary workaround for untyped pointers upstream. // FIXME(eddyb) replace with untyped memory SPIR-V + `qptr` or similar. + #[instrument(level = "trace", skip(self), fields(ptr, ty = ?self.debug_type(ty)))] fn adjust_pointer_for_typed_access( &mut self, ptr: SpirvValue, ty: ::Type, ) -> (SpirvValue, ::Type) { + debug!("currently in adjust_pointer_for_typed_access"); self.lookup_type(ty) .sizeof(self) .and_then(|size| self.adjust_pointer_for_sized_access(ptr, size)) @@ -426,6 +439,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // // HACK(eddyb) temporary workaround for untyped pointers upstream. // FIXME(eddyb) replace with untyped memory SPIR-V + `qptr` or similar. + #[instrument(level = "trace", skip(self))] fn adjust_pointer_for_sized_access( &mut self, ptr: SpirvValue, @@ -434,11 +448,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let ptr = ptr.strip_ptrcasts(); let mut leaf_ty = match self.lookup_type(ptr.ty) { SpirvType::Pointer { pointee } => pointee, - other => self.fatal(format!("non-pointer type: {other:?}")), + other => self.fatal(format!("`ptr` is non-pointer type: {other:?}")), }; - // FIXME(eddyb) this isn't efficient, `recover_access_chain_from_offset` - // could instead be doing all the extra digging itself. + debug!( + "before nested adjust_pointer_for_sized_access. `leaf_ty`: {}", + self.debug_type(leaf_ty) + ); + let mut indices = SmallVec::<[_; 8]>::new(); while let Some((inner_indices, inner_ty)) = self.recover_access_chain_from_offset( leaf_ty, @@ -450,9 +467,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { leaf_ty = inner_ty; } + debug!( + "after nested adjust_pointer_for_sized_access. `leaf_ty`: {}", + self.debug_type(leaf_ty) + ); + let leaf_ptr_ty = (self.lookup_type(leaf_ty).sizeof(self) == Some(size)) .then(|| self.type_ptr_to(leaf_ty))?; - let leaf_ptr = if indices.is_empty() { assert_ty_eq!(self, ptr.ty, leaf_ptr_ty); ptr @@ -466,6 +487,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { .unwrap() .with_type(leaf_ptr_ty) }; + + debug!( + "adjust_pointer_for_sized_access returning {} {}", + self.debug_type(leaf_ptr.ty), + self.debug_type(leaf_ty) + ); Some((leaf_ptr, leaf_ty)) } @@ -476,6 +503,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { /// /// That is, try to turn `((_: *T) as *u8).add(offset) as *Leaf` into a series /// of struct field and array/vector element accesses. + #[instrument(level = "trace", skip(self), fields(ty = ?self.debug_type(ty), leaf_size_or_unsized_range, leaf_ty = ?leaf_ty))] fn recover_access_chain_from_offset( &self, mut ty: ::Type, @@ -485,9 +513,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { leaf_ty: Option<::Type>, ) -> Option<(SmallVec<[u32; 8]>, ::Type)> { assert_ne!(Some(ty), leaf_ty); + debug!("recovering access chain: ty: {:?}", self.debug_type(ty)); + if let Some(leaf_ty) = leaf_ty { + debug!( + "recovering access chain: leaf_ty: {:?}", + self.debug_type(leaf_ty) + ); + } else { + debug!("recovering access chain: leaf_ty: None"); + } // HACK(eddyb) this has the correct ordering (`Sized(_) < Unsized`). - #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] + #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] enum MaybeSized { Sized(Size), Unsized, @@ -499,6 +536,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { start..=end }; + debug!("leaf_size_range: {:?}", leaf_size_range); + // NOTE(eddyb) `ty` and `ty_kind`/`ty_size` should be kept in sync. let mut ty_kind = self.lookup_type(ty); @@ -511,6 +550,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { field_offsets, .. } => { + debug!("recovering access chain from ADT"); let (i, field_ty, field_ty_kind, field_ty_size, offset_in_field) = field_offsets .iter() .enumerate() @@ -540,18 +580,25 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { })?; ty = field_ty; + debug!("setting ty = field_ty: {:?}", self.debug_type(field_ty)); ty_kind = field_ty_kind; + debug!("setting ty_kind = field_ty_kind: {:?}", field_ty_kind); ty_size = field_ty_size; + debug!("setting ty_size = field_ty_size: {:?}", field_ty_size); indices.push(i as u32); offset = offset_in_field; + debug!("setting offset = offset_in_field: {:?}", offset_in_field); } SpirvType::Vector { element, .. } | SpirvType::Array { element, .. } | SpirvType::RuntimeArray { element } | SpirvType::Matrix { element, .. } => { + debug!("recovering access chain from Vector, Array, RuntimeArray, or Matrix"); ty = element; + debug!("setting ty = element: {:?}", self.debug_type(element)); ty_kind = self.lookup_type(ty); + debug!("looked up ty kind: {:?}", ty_kind); let stride = ty_kind.sizeof(self)?; ty_size = MaybeSized::Sized(stride); @@ -559,11 +606,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { indices.push((offset.bytes() / stride.bytes()).try_into().ok()?); offset = Size::from_bytes(offset.bytes() % stride.bytes()); } - _ => return None, + _ => { + debug!("recovering access chain from SOMETHING ELSE, RETURNING NONE"); + return None; + } } // Avoid digging beyond the point the leaf could actually fit. if ty_size < *leaf_size_range.start() { + debug!("avoiding digging beyond the point the leaf could actually fit"); return None; } @@ -571,11 +622,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { && leaf_size_range.contains(&ty_size) && leaf_ty.map_or(true, |leaf_ty| leaf_ty == ty) { + debug!("returning type: {:?}", self.debug_type(ty)); + debug!("returning indices with len: {:?}", indices.len()); return Some((indices, ty)); } } } + #[instrument(level = "trace", skip(self), fields(ty = ?self.debug_type(ty), ptr, combined_indices = ?combined_indices.into_iter().map(|x| (self.debug_type(x.ty), x.kind)).collect::>(), is_inbounds))] fn maybe_inbounds_gep( &mut self, ty: Word, @@ -585,6 +639,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ) -> SpirvValue { let (&ptr_base_index, indices) = combined_indices.split_first().unwrap(); + debug!("maybe_inbounds_gep: ty: {:?}", self.debug_type(ty)); + // The first index is an offset to the pointer, the rest are actual members. // https://llvm.org/docs/GetElementPtr.html // "An OpAccessChain instruction is the equivalent of an LLVM getelementptr instruction where the first index element is zero." @@ -632,19 +688,41 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { Some(Size::ZERO)..=None, None, ) { + debug!("`recover_access_chain_from_offset` returned Some"); // FIXME(eddyb) this condition is pretty limiting, but // eventually it shouldn't matter if GEPs are going away. if ty == base_pointee_ty || indices.is_empty() { let result_pointee_type = if indices.is_empty() { + debug!("indices is empty"); + debug!("base_pointee: {}", self.debug_type(base_pointee_ty)); + debug!("result_pointee: {}", self.debug_type(result_pointee_type)); + debug!( + "setting result_pointee_type to base_pointee_ty: {}", + self.debug_type(base_pointee_ty) + ); base_pointee_ty } else { + debug!("ty == base_pointee_ty"); + debug!("base_pointee: {}", self.debug_type(base_pointee_ty)); + debug!("result_pointee: {}", self.debug_type(result_pointee_type)); + debug!( + "setting result_pointee_type to result_pointee_type: {}", + self.debug_type(result_pointee_type) + ); result_pointee_type }; + let indices = base_indices .into_iter() .map(|idx| self.constant_u32(self.span(), idx).def(self)) .chain(indices) .collect(); + + debug!( + "emitting access chain with a pointer to type: {}", + self.debug_type(result_pointee_type) + ); + return self.emit_access_chain( self.type_ptr_to(result_pointee_type), ptr_id, @@ -657,6 +735,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } let result_type = self.type_ptr_to(result_pointee_type); + debug!("result_type: {}", self.debug_type(result_type)); + debug!( + "original_pointee_type: {}", + self.debug_type(original_pointee_ty) + ); // Check if `ptr_id` is defined by an `OpAccessChain`, and if it is, // grab its base pointer and indices. @@ -664,6 +747,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // FIXME(eddyb) this could get ridiculously expensive, at the very least // it could use `.rev()`, hoping the base pointer was recently defined? let maybe_original_access_chain = if ty == original_pointee_ty { + debug!( + "ty {} == original_pointee_ty {}", + self.debug_type(ty), + self.debug_type(original_pointee_ty) + ); let emit = self.emit(); let module = emit.module_ref(); let func = &module.functions[emit.selected_function().unwrap()]; @@ -687,9 +775,16 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { }); base_ptr_and_combined_indices } else { + debug!( + "ty {} != original_pointee_ty {}", + self.debug_type(ty), + self.debug_type(original_pointee_ty) + ); None }; if let Some((original_ptr, mut original_indices)) = maybe_original_access_chain { + debug!("has original access chain"); + // Transform the following: // OpAccessChain original_ptr [a, b, c] // OpPtrAccessChain ptr base [d, e, f] @@ -708,8 +803,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { original_indices, is_inbounds, ); + } else { + debug!("no original access chain"); } + debug!("maybe_inbounds_gep calling pointercast"); + // HACK(eddyb) temporary workaround for untyped pointers upstream. // FIXME(eddyb) replace with untyped memory SPIR-V + `qptr` or similar. let ptr = self.pointercast(ptr, self.type_ptr_to(ty)); @@ -724,6 +823,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ) } + #[instrument( + level = "debug", + skip(self), + fields( + result_type = ?self.debug_type(result_type), + pointer, + ptr_base_index, + indices, + is_inbounds + ) + )] fn emit_access_chain( &self, result_type: ::Type, @@ -768,6 +878,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { .with_type(result_type) } + #[instrument(level = "trace", skip(self))] fn fptoint_sat( &mut self, signed: bool, @@ -947,6 +1058,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } // HACK(eddyb) helper shared by `typed_alloca` and `alloca`. + #[instrument(level = "trace", skip(self), fields(ty = ?self.debug_type(ty)))] fn declare_func_local_var( &mut self, ty: ::Type, @@ -986,6 +1098,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { type CodegenCx = CodegenCx<'tcx>; + #[instrument(level = "trace", skip(cx))] fn build(cx: &'a Self::CodegenCx, llbb: Self::BasicBlock) -> Self { let cursor = cx.builder.select_block_by_id(llbb); // FIXME(eddyb) change `Self::Function` to be more like a function index. @@ -1378,6 +1491,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { .with_type(val.ty) } + #[instrument(level = "trace", skip(self))] fn checked_binop( &mut self, oop: OverflowOp, @@ -1794,7 +1908,13 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { } } + #[instrument(level = "trace", skip(self), fields(dest_ty = ?self.debug_type(dest_ty)))] fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { + debug!( + "bitcast val: {} -> dest: {}", + self.debug_type(val.ty), + self.debug_type(dest_ty) + ); if val.ty == dest_ty { val } else { @@ -1805,6 +1925,9 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { // in SPIR-V, but still being used to paper over untyped pointers, // by unpacking/repacking newtype-shaped aggregates as-needed. let unpack_newtype = |ty, kind| { + let span = span!(Level::DEBUG, "unpack_newtype"); + let _guard = span.enter(); + if !matches!(kind, SpirvType::Adt { .. } | SpirvType::Array { .. }) { return None; } @@ -1833,12 +1956,25 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { .composite_extract(in_leaf_ty, None, val.def(self), indices) .unwrap() .with_type(in_leaf_ty); + + debug!( + "unpacked newtype. val: {} -> in_leaf_ty: {}", + self.debug_type(val.ty), + self.debug_type(in_leaf_ty), + ); return self.bitcast(in_leaf, dest_ty); } + // Repack output newtypes, after bitcasting the leaf inside, instead. if let Some((indices, out_leaf_ty)) = unpack_newtype(dest_ty, dest_ty_kind) { + debug!( + "unpacked newtype: dest: {} -> out_leaf_ty: {}", + self.debug_type(dest_ty), + self.debug_type(out_leaf_ty), + ); let out_leaf = self.bitcast(val, out_leaf_ty); let out_agg_undef = self.undef(dest_ty); + debug!("returning composite insert"); return self .emit() .composite_insert( @@ -1857,9 +1993,16 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { // Reuse the pointer-specific logic in `pointercast` for `*T -> *U`. if val_is_ptr && dest_is_ptr { + debug!("val and dest are both pointers"); return self.pointercast(val, dest_ty); } + debug!( + "before emitting: val ty: {} -> dest ty: {}", + self.debug_type(val.ty), + self.debug_type(dest_ty) + ); + let result = self .emit() .bitcast(dest_ty, None, val.def(self)) @@ -1940,20 +2083,28 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { } } + #[instrument(level = "trace", skip(self), fields(ptr, ptr_ty = ?self.debug_type(ptr.ty), dest_ty = ?self.debug_type(dest_ty)))] fn pointercast(&mut self, ptr: Self::Value, dest_ty: Self::Type) -> Self::Value { // HACK(eddyb) reuse the special-casing in `const_bitcast`, which relies // on adding a pointer type to an untyped pointer (to some const data). if let SpirvValueKind::IllegalConst(_) = ptr.kind { + debug!("illegal const"); return self.const_bitcast(ptr, dest_ty); } if ptr.ty == dest_ty { + debug!("ptr.ty == dest_ty"); return ptr; } // Strip a previous `pointercast`, to reveal the original pointer type. let ptr = ptr.strip_ptrcasts(); + debug!( + "ptr type after strippint pointer cases: {}", + self.debug_type(ptr.ty), + ); + if ptr.ty == dest_ty { return ptr; } @@ -1978,6 +2129,12 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { dest_pointee_size..=dest_pointee_size, Some(dest_pointee), ) { + debug!("`recover_access_chain_from_offset` returned something"); + debug!( + "ptr_pointee: {}, dest_pointee {}", + self.debug_type(ptr_pointee), + self.debug_type(dest_pointee), + ); let indices = indices .into_iter() .map(|idx| self.constant_u32(self.span(), idx).def(self)) @@ -1987,6 +2144,12 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { .unwrap() .with_type(dest_ty) } else { + debug!("`recover_access_chain_from_offset` returned `None`"); + debug!( + "ptr_pointee: {}, dest_pointee {}", + self.debug_type(ptr_pointee), + self.debug_type(dest_pointee), + ); // Defer the cast so that it has a chance to be avoided. let original_ptr = ptr.def(self); SpirvValue { @@ -2253,6 +2416,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { .with_type(b) } + #[instrument(level = "trace", skip(self))] fn memcpy( &mut self, dst: Self::Value, @@ -2277,19 +2441,36 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { } let typed_copy_dst_src = const_size.and_then(|const_size| { + debug!( + "adjusting pointers: src: {} -> dst: {}", + self.debug_type(src.ty), + self.debug_type(dst.ty), + ); let dst_adj = self.adjust_pointer_for_sized_access(dst, const_size); let src_adj = self.adjust_pointer_for_sized_access(src, const_size); match (dst_adj, src_adj) { // HACK(eddyb) fill in missing `dst`/`src` with the other side. (Some((dst, access_ty)), None) => { + debug!( + "DESTINATION adjusted memcpy calling pointercast: dst ty: {}, access ty: {}", + self.debug_type(dst.ty), + self.debug_type(access_ty) + ); Some((dst, self.pointercast(src, self.type_ptr_to(access_ty)))) } (None, Some((src, access_ty))) => { + debug!( + "SOURCE adjusted memcpy calling pointercast: dst ty: {} -> access ty: {}, src ty: {}", + self.debug_type(dst.ty), + self.debug_type(access_ty), + self.debug_type(src.ty) + ); Some((self.pointercast(dst, self.type_ptr_to(access_ty)), src)) } (Some((dst, dst_access_ty)), Some((src, src_access_ty))) if dst_access_ty == src_access_ty => { + debug!("BOTH adjusted memcpy calling pointercast"); Some((dst, src)) } (None, None) | (Some(_), Some(_)) => None, @@ -2298,8 +2479,10 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { if let Some((dst, src)) = typed_copy_dst_src { if let Some(const_value) = src.const_fold_load(self) { + debug!("storing const value"); self.store(const_value, dst, Align::from_bytes(0).unwrap()); } else { + debug!("copying memory using OpCopyMemory"); self.emit() .copy_memory(dst.def(self), src.def(self), None, None, empty()) .unwrap(); @@ -2456,12 +2639,14 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { .with_type(result_type) } + #[instrument(level = "trace", skip(self))] fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value { let field_type = match self.lookup_type(agg_val.ty) { SpirvType::Adt { field_types, .. } => field_types[idx as usize], other => self.fatal(format!("insert_value not implemented on type {other:?}")), }; + debug!("insert value"); // HACK(eddyb) temporary workaround for untyped pointers upstream. // FIXME(eddyb) replace with untyped memory SPIR-V + `qptr` or similar. let elt = self.bitcast(elt, field_type); @@ -2705,6 +2890,9 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { funclet: Option<&Self::Funclet>, instance: Option>, ) -> Self::Value { + let span = tracing::span!(tracing::Level::DEBUG, "call"); + let _enter = span.enter(); + if funclet.is_some() { self.fatal("TODO: Funclets are not supported"); } @@ -2753,6 +2941,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { ), }; + debug!("call"); // HACK(eddyb) temporary workaround for untyped pointers upstream. // FIXME(eddyb) replace with untyped memory SPIR-V + `qptr` or similar. let args: SmallVec<[_; 8]> = args diff --git a/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs b/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs index 07e617c898..b2427fedef 100644 --- a/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs +++ b/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs @@ -367,11 +367,11 @@ impl<'tcx> CodegenCx<'tcx> { } pub fn create_const_alloc(&self, alloc: ConstAllocation<'tcx>, ty: Word) -> SpirvValue { - // println!( - // "Creating const alloc of type {} with {} bytes", - // self.debug_type(ty), - // alloc.inner().len() - // ); + tracing::debug!( + "Creating const alloc of type {} with {} bytes", + self.debug_type(ty), + alloc.inner().len() + ); let mut offset = Size::ZERO; let result = self.read_from_const_alloc(alloc, &mut offset, ty); assert_eq!( @@ -379,7 +379,7 @@ impl<'tcx> CodegenCx<'tcx> { alloc.inner().len(), "create_const_alloc must consume all bytes of an Allocation" ); - // println!("Done creating alloc of type {}", self.debug_type(ty)); + tracing::debug!("Done creating alloc of type {}", self.debug_type(ty)); result } diff --git a/crates/rustc_codegen_spirv/src/lib.rs b/crates/rustc_codegen_spirv/src/lib.rs index 9f20513fa4..b2bd123dd2 100644 --- a/crates/rustc_codegen_spirv/src/lib.rs +++ b/crates/rustc_codegen_spirv/src/lib.rs @@ -167,6 +167,7 @@ use std::io::Cursor; use std::io::Write; use std::path::Path; use std::sync::Arc; +use tracing::{error, warn}; fn dump_mir(tcx: TyCtxt<'_>, mono_items: &[(MonoItem<'_>, MonoItemData)], path: &Path) { create_dir_all(path.parent().unwrap()).unwrap(); @@ -361,11 +362,11 @@ impl WriteBackendMethods for SpirvCodegenBackend { } fn print_pass_timings(&self) { - println!("TODO: Implement print_pass_timings"); + warn!("TODO: Implement print_pass_timings"); } fn print_statistics(&self) { - println!("TODO: Implement print_statistics"); + warn!("TODO: Implement print_statistics"); } unsafe fn optimize( @@ -536,7 +537,7 @@ impl Drop for DumpModuleOnPanic<'_, '_, '_> { if self.path.has_root() { self.cx.builder.dump_module(self.path); } else { - println!("{}", self.cx.builder.dump_module_str()); + error!("{}", self.cx.builder.dump_module_str()); } } } diff --git a/tests/ui/lang/issue-46.rs b/tests/ui/lang/issue-46.rs index ca7be62dd6..4cf6e05355 100644 --- a/tests/ui/lang/issue-46.rs +++ b/tests/ui/lang/issue-46.rs @@ -1,15 +1,8 @@ -// build-fail +// build-pass use spirv_std::spirv; -#[derive(Default)] -struct Foo { - bar: bool, - baz: [[u32; 2]; 1], -} - #[spirv(fragment)] pub fn main() { let x = [[1; 2]; 1]; - let y = [Foo::default(); 1]; } diff --git a/tests/ui/lang/issue-46.stderr b/tests/ui/lang/issue-46.stderr index 1e8c8b8c8c..706c500912 100644 --- a/tests/ui/lang/issue-46.stderr +++ b/tests/ui/lang/issue-46.stderr @@ -1,40 +1,22 @@ error: cannot cast between pointer types from `*i32` to `*[i32; 2]` - --> $DIR/issue-46.rs:13:13 - | -13 | let x = [[1; 2]; 1]; - | ^^^^^^^^^^^ - | + --> $DIR/issue-46.rs:7:13 + | +7 | let x = [[1; 2]; 1]; + | ^^^^^^^^^^^ + | note: used from within `issue_46::main` - --> $DIR/issue-46.rs:13:13 - | -13 | let x = [[1; 2]; 1]; - | ^^^^^^^^^^^ + --> $DIR/issue-46.rs:7:13 + | +7 | let x = [[1; 2]; 1]; + | ^^^^^^^^^^^ note: called by `main` - --> $DIR/issue-46.rs:12:8 - | -12 | pub fn main() { - | ^^^^ + --> $DIR/issue-46.rs:6:8 + | +6 | pub fn main() { + | ^^^^ -error: cannot cast between pointer types - from `*[[u32; 2]; 1]` - to `*struct Foo { baz: [[u32; 2]; 1], bar: bool }` - --> $DIR/issue-46.rs:14:13 - | -14 | let y = [Foo::default(); 1]; - | ^^^^^^^^^^^^^^^^^^^ - | -note: used from within `issue_46::main` - --> $DIR/issue-46.rs:14:13 - | -14 | let y = [Foo::default(); 1]; - | ^^^^^^^^^^^^^^^^^^^ -note: called by `main` - --> $DIR/issue-46.rs:12:8 - | -12 | pub fn main() { - | ^^^^ - -error: aborting due to 2 previous errors +x +error: aborting due to 1 previous error From 99d91a13019d162086e8dce94dd0206edbe4d4be Mon Sep 17 00:00:00 2001 From: Christian Legnitto Date: Sat, 12 Apr 2025 13:07:47 -0400 Subject: [PATCH 2/9] Update tests --- tests/ui/lang/issue-46.rs | 7 +++++++ tests/ui/lang/issue-46.stderr | 22 ---------------------- 2 files changed, 7 insertions(+), 22 deletions(-) delete mode 100644 tests/ui/lang/issue-46.stderr diff --git a/tests/ui/lang/issue-46.rs b/tests/ui/lang/issue-46.rs index 4cf6e05355..99a0835a95 100644 --- a/tests/ui/lang/issue-46.rs +++ b/tests/ui/lang/issue-46.rs @@ -2,7 +2,14 @@ use spirv_std::spirv; +#[derive(Default)] +struct Foo { + bar: bool, + baz: [[u32; 2]; 1], +} + #[spirv(fragment)] pub fn main() { let x = [[1; 2]; 1]; + let y = [Foo::default(); 1]; } diff --git a/tests/ui/lang/issue-46.stderr b/tests/ui/lang/issue-46.stderr deleted file mode 100644 index 706c500912..0000000000 --- a/tests/ui/lang/issue-46.stderr +++ /dev/null @@ -1,22 +0,0 @@ -error: cannot cast between pointer types - from `*i32` - to `*[i32; 2]` - --> $DIR/issue-46.rs:7:13 - | -7 | let x = [[1; 2]; 1]; - | ^^^^^^^^^^^ - | -note: used from within `issue_46::main` - --> $DIR/issue-46.rs:7:13 - | -7 | let x = [[1; 2]; 1]; - | ^^^^^^^^^^^ -note: called by `main` - --> $DIR/issue-46.rs:6:8 - | -6 | pub fn main() { - | ^^^^ - -x -error: aborting due to 1 previous error - From 74538e8d4c323ec69674fe07e1b498527b14d2b9 Mon Sep 17 00:00:00 2001 From: Christian Legnitto Date: Sat, 12 Apr 2025 14:51:59 -0400 Subject: [PATCH 3/9] Correctly calculate access chains for compound types Fixes https://github.com/Rust-GPU/rust-gpu/issues/46. --- .../src/builder/builder_methods.rs | 447 ++++++++++++------ 1 file changed, 309 insertions(+), 138 deletions(-) diff --git a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs index 263e8fa609..43f4558894 100644 --- a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs +++ b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs @@ -632,194 +632,365 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { #[instrument(level = "trace", skip(self), fields(ty = ?self.debug_type(ty), ptr, combined_indices = ?combined_indices.into_iter().map(|x| (self.debug_type(x.ty), x.kind)).collect::>(), is_inbounds))] fn maybe_inbounds_gep( &mut self, + // Represents the type of the element that `ptr` is assumed to point to + // *before* applying the *first* index (`ptr_base_index`). This type is used + // primarily to calculate the size for the initial offset. ty: Word, + // The base pointer value for the GEP operation. ptr: SpirvValue, + // A slice of indices used for the GEP calculation. The *first* index is treated + // as the main offset from the base pointer `ptr`, scaled by the size of `ty`. + // Subsequent indices navigate through nested aggregate types (structs, arrays, + // vectors, etc.) starting from `ty`. combined_indices: &[SpirvValue], + // If true, generate an `OpInBoundsAccessChain`, which has stricter requirements + // but allows more optimization. If false, generate `OpAccessChain`. is_inbounds: bool, ) -> SpirvValue { + // Separate the first index (used for the base offset) from the rest of the + // indices (used for navigating within the aggregate type). `ptr_base_index` is + // the index applied directly to `ptr`, effectively an offset multiplier based + // on the size of `ty`. `indices` are the subsequent indices used to drill down + // into fields or elements of `ty`. let (&ptr_base_index, indices) = combined_indices.split_first().unwrap(); debug!("maybe_inbounds_gep: ty: {:?}", self.debug_type(ty)); - // The first index is an offset to the pointer, the rest are actual members. - // https://llvm.org/docs/GetElementPtr.html - // "An OpAccessChain instruction is the equivalent of an LLVM getelementptr instruction where the first index element is zero." - // https://github.com/gpuweb/gpuweb/issues/33 - let mut result_pointee_type = ty; - let indices: Vec<_> = indices - .iter() - .map(|index| { - result_pointee_type = match self.lookup_type(result_pointee_type) { - SpirvType::Array { element, .. } | SpirvType::RuntimeArray { element } => { - element - } - _ => self.fatal(format!( - "GEP not implemented for type {}", - self.debug_type(result_pointee_type) - )), - }; - index.def(self) - }) - .collect(); + // Determine if this GEP operation is effectively byte-level addressing. + // This check is based on the *provided* input type `ty`. If `ty` is i8 or u8, + // it suggests the caller intends to perform byte-offset calculations, + // which might allow for more flexible type recovery later. + let is_byte_gep = matches!(self.lookup_type(ty), SpirvType::Integer(8, _)); + debug!("Is byte GEP (based on input type): {}", is_byte_gep); + + // --- Calculate the final pointee type based on the GEP operation --- + + // This loop does the type traversal according to the `indices` (excluding the + // base offset index). It starts with the initial element type `ty` and + // iteratively applies each index to determine the type of the element being + // accessed at each step. The result is the type that the *final* pointer, + // generated by the SPIR-V `AccessChain`` instruction, *must* point to according + // to the SPIR-V specification and the provided `indices`. + let mut calculated_pointee_type = ty; + for index_val in indices { + // Lookup the current aggregate type we are indexing into. + calculated_pointee_type = match self.lookup_type(calculated_pointee_type) { + // If it's a struct (ADT), the index must be a constant. Use it to get + // the field type. + SpirvType::Adt { field_types, .. } => { + let const_index = self + .builder + .lookup_const_scalar(*index_val) + .expect("Non-constant struct index for GEP") + as usize; + // Get the type of the specific field. + field_types[const_index] + } + // If it's an array, vector, or matrix, indexing yields the element type. + SpirvType::Array { element, .. } + | SpirvType::RuntimeArray { element } + | SpirvType::Vector { element, .. } + | SpirvType::Matrix { element, .. } => element, + // Special case: If we started with a byte GEP (`is_byte_gep` is true) and + // we are currently indexing into a byte type, the result is still a byte type. + // This prevents errors if `indices` contains non-zero values when `ty` is u8/i8. + SpirvType::Integer(8, signedness) if is_byte_gep => { + // Define the resulting byte type as it might not exist yet). + SpirvType::Integer(8, signedness).def(self.span(), self) + } + // Any other type cannot be indexed into via GEP. + _ => self.fatal(format!( + "GEP not implemented for indexing into type {}", + self.debug_type(calculated_pointee_type) + )), + }; + } + // Construct the SPIR-V pointer type that points to the final calculated pointee + // type. This is the *required* result type for the SPIR-V `AccessChain` + // instruction. + let final_spirv_ptr_type = self.type_ptr_to(calculated_pointee_type); + debug!( + "Calculated final SPIR-V pointee type: {}", + self.debug_type(calculated_pointee_type) + ); + debug!( + "Calculated final SPIR-V ptr type: {}", + self.debug_type(final_spirv_ptr_type) + ); - // Special-case field accesses through a `pointercast`, to access the - // right field in the original type, for the `Logical` addressing model. + // Ensure all the `indices` (excluding the base offset index) are defined in the + // SPIR-V module and get their corresponding SPIR-V IDs. These IDs will be used + // as operands in the AccessChain instruction. + let gep_indices_ids: Vec<_> = indices.iter().map(|index| index.def(self)).collect(); + + // --- Prepare the base pointer --- + + // Remove any potentially redundant pointer casts applied to the input `ptr`. + // GEP operations should ideally work on the "underlying" pointer. let ptr = ptr.strip_ptrcasts(); + // Get the SPIR-V ID for the (potentially stripped) base pointer. let ptr_id = ptr.def(self); + // Determine the actual pointee type of the base pointer `ptr` *after* stripping casts. + // This might differ from the input `ty` if `ty` was less specific (e.g., u8). let original_pointee_ty = match self.lookup_type(ptr.ty) { SpirvType::Pointer { pointee } => pointee, other => self.fatal(format!("gep called on non-pointer type: {other:?}")), }; - // HACK(eddyb) `struct_gep` itself is falling out of use, as it's being - // replaced upstream by `ptr_add` (aka `inbounds_gep` with byte offsets). - // - // FIXME(eddyb) get rid of everything other than: - // - constant byte offset (`ptr_add`?) - // - dynamic indexing of a single array - let const_ptr_offset = self + // --- Recovery Path --- + + // Try to calculate the byte offset implied by the *first* index + // (`ptr_base_index`) if it's a compile-time constant. This uses the size of the + // *input type* `ty`. + let const_ptr_offset_bytes = self .builder - .lookup_const_scalar(ptr_base_index) - .and_then(|idx| Some(u64::try_from(idx).ok()? * self.lookup_type(ty).sizeof(self)?)); - if let Some(const_ptr_offset) = const_ptr_offset { + .lookup_const_scalar(ptr_base_index) // Check if ptr_base_index is constant scalar + .and_then(|idx| { + let idx_u64 = u64::try_from(idx).ok()?; + // Get the size of the input type `ty` + self.lookup_type(ty) + .sizeof(self) + // Calculate offset in bytes + .map(|size| idx_u64.saturating_mul(size.bytes())) + }); + + // If we successfully calculated a constant byte offset for the first index... + if let Some(const_ptr_offset_bytes) = const_ptr_offset_bytes { + // Try to reconstruct a more "structured" access chain based on the *original* + // pointee type of the pointer (`original_pointee_ty`) and the calculated byte offset. + // This is useful if the input `ty` was generic (like u8) but the pointer actually + // points to a structured type (like a struct). `recover_access_chain_from_offset` + // attempts to find a sequence of constant indices (`base_indices`) into + // `original_pointee_ty` that matches the `const_ptr_offset_bytes`. if let Some((base_indices, base_pointee_ty)) = self.recover_access_chain_from_offset( + // Start from the pointer's actual underlying type original_pointee_ty, - const_ptr_offset, + // The target byte offset + Size::from_bytes(const_ptr_offset_bytes), + // Allowed range (not strictly needed here?) Some(Size::ZERO)..=None, + // Don't require alignment None, ) { - debug!("`recover_access_chain_from_offset` returned Some"); - // FIXME(eddyb) this condition is pretty limiting, but - // eventually it shouldn't matter if GEPs are going away. - if ty == base_pointee_ty || indices.is_empty() { - let result_pointee_type = if indices.is_empty() { - debug!("indices is empty"); - debug!("base_pointee: {}", self.debug_type(base_pointee_ty)); - debug!("result_pointee: {}", self.debug_type(result_pointee_type)); - debug!( - "setting result_pointee_type to base_pointee_ty: {}", - self.debug_type(base_pointee_ty) - ); - base_pointee_ty - } else { - debug!("ty == base_pointee_ty"); - debug!("base_pointee: {}", self.debug_type(base_pointee_ty)); - debug!("result_pointee: {}", self.debug_type(result_pointee_type)); - debug!( - "setting result_pointee_type to result_pointee_type: {}", - self.debug_type(result_pointee_type) - ); - result_pointee_type - }; + // Recovery successful! Found a structured path (`base_indices`) to the target offset. + debug!( + "`recover_access_chain_from_offset` returned Some with base_pointee_ty: {}", + self.debug_type(base_pointee_ty) + ); - let indices = base_indices + // Determine the result type for the `AccessChain` instruction we might + // emit. By default, use the `final_spirv_ptr_type` strictly calculated + // earlier from `ty` and `indices`. + // + // If this is a byte GEP *and* the recovered type happens to be a byte + // type, we can use the pointer type derived from the *recovered* type + // (`base_pointee_ty`). This helps preserve type information when + // recovery works for byte addressing. + let result_wrapper_type = if !is_byte_gep + || matches!(self.lookup_type(base_pointee_ty), SpirvType::Integer(8, _)) + { + debug!( + "Using strictly calculated type for wrapper: {}", + // Use type based on input `ty` + `indices` + self.debug_type(calculated_pointee_type) + ); + final_spirv_ptr_type + } else { + debug!( + "Byte GEP allowing recovered type for wrapper: {}", + // Use type based on recovery result + self.debug_type(base_pointee_ty) + ); + self.type_ptr_to(base_pointee_ty) + }; + + // Check if we can directly use the recovered path combined with the + // remaining indices. This is possible if: + // 1. The input type `ty` matches the type found by recovery + // (`base_pointee_ty`). This means the recovery didn't fundamentally + // change the type interpretation needed for the *next* steps + // (`indices`). + // OR + // 2. There are no further indices (`gep_indices_ids` is empty). In this + // case, the recovery path already leads to the final destination. + if ty == base_pointee_ty || gep_indices_ids.is_empty() { + // Combine the recovered constant indices with the remaining dynamic/constant indices. + let combined_indices = base_indices .into_iter() + // Convert recovered `u32` indices to constant SPIR-V IDs. .map(|idx| self.constant_u32(self.span(), idx).def(self)) - .chain(indices) + // Chain the original subsequent indices (`indices`). + .chain(gep_indices_ids.iter().copied()) .collect(); debug!( - "emitting access chain with a pointer to type: {}", - self.debug_type(result_pointee_type) + "emitting access chain via recovery path with wrapper type: {}", + self.debug_type(result_wrapper_type) ); - + // Emit a single AccessChain using the original pointer `ptr_id` and the fully combined index list. + // Note: We don't pass `ptr_base_index` here because its effect is incorporated into `base_indices`. return self.emit_access_chain( - self.type_ptr_to(result_pointee_type), - ptr_id, - None, - indices, - is_inbounds, + result_wrapper_type, // The chosen result pointer type + ptr_id, // The original base pointer ID + None, // No separate base index needed + combined_indices, // The combined structured + original indices + is_inbounds, // Preserve original inbounds request + ); + } else { + // Recovery happened, but the recovered type `base_pointee_ty` doesn't match the input `ty`, + // AND there are more `indices` to process. Using the `base_indices` derived from + // `original_pointee_ty` would be incorrect for interpreting the subsequent `indices` + // which were intended to operate relative to `ty`. Fall back to the standard path. + debug!( + "Recovery type mismatch ({}) vs ({}) and GEP indices exist, falling back", + self.debug_type(ty), + self.debug_type(base_pointee_ty) ); } + } else { + // `recover_access_chain_from_offset` couldn't find a structured path for the constant offset. + debug!("`recover_access_chain_from_offset` returned None, falling back"); } } - let result_type = self.type_ptr_to(result_pointee_type); - debug!("result_type: {}", self.debug_type(result_type)); - debug!( - "original_pointee_type: {}", - self.debug_type(original_pointee_ty) - ); + // --- End Recovery Path --- - // Check if `ptr_id` is defined by an `OpAccessChain`, and if it is, - // grab its base pointer and indices. - // - // FIXME(eddyb) this could get ridiculously expensive, at the very least - // it could use `.rev()`, hoping the base pointer was recently defined? + // --- Attempt GEP Merging Path --- + + // Check if the base pointer `ptr` itself was the result of a previous + // AccessChain instruction. Merging is only attempted if the input type `ty` + // matches the pointer's actual underlying pointee type `original_pointee_ty`. + // If they differ, merging could be invalid. let maybe_original_access_chain = if ty == original_pointee_ty { - debug!( - "ty {} == original_pointee_ty {}", - self.debug_type(ty), - self.debug_type(original_pointee_ty) - ); - let emit = self.emit(); - let module = emit.module_ref(); - let func = &module.functions[emit.selected_function().unwrap()]; - let base_ptr_and_combined_indices = func - .all_inst_iter() - .find(|inst| inst.result_id == Some(ptr_id)) - .and_then(|ptr_def_inst| { - if matches!( - ptr_def_inst.class.opcode, - Op::AccessChain | Op::InBoundsAccessChain - ) { - let base_ptr = ptr_def_inst.operands[0].unwrap_id_ref(); - let indices = ptr_def_inst.operands[1..] - .iter() - .map(|op| op.unwrap_id_ref()) - .collect::>(); - Some((base_ptr, indices)) - } else { - None - } - }); - base_ptr_and_combined_indices + // Search the current function's instructions... + let search_result = { + let emit = self.emit(); + let module = emit.module_ref(); + emit.selected_function().and_then(|func_idx| { + module.functions.get(func_idx).and_then(|func| { + // Find the instruction that defined our base pointer `ptr_id`. + func.all_inst_iter() + .find(|inst| inst.result_id == Some(ptr_id)) + .and_then(|ptr_def_inst| { + // Check if that instruction was an `AccessChain` or `InBoundsAccessChain`. + if matches!( + ptr_def_inst.class.opcode, + Op::AccessChain | Op::InBoundsAccessChain + ) { + // If yes, extract its base pointer and its indices. + let base_ptr = ptr_def_inst.operands[0].unwrap_id_ref(); + let indices = ptr_def_inst.operands[1..] + .iter() + .map(|op| op.unwrap_id_ref()) + .collect::>(); + Some((base_ptr, indices)) + } else { + // The instruction defining ptr was not an `AccessChain`. + None + } + }) + }) + }) + }; + search_result } else { - debug!( - "ty {} != original_pointee_ty {}", - self.debug_type(ty), - self.debug_type(original_pointee_ty) - ); + // Input type `ty` doesn't match the pointer's actual type, cannot safely merge. None }; + + // If we found that `ptr` was defined by a previous `AccessChain`... if let Some((original_ptr, mut original_indices)) = maybe_original_access_chain { - debug!("has original access chain"); - - // Transform the following: - // OpAccessChain original_ptr [a, b, c] - // OpPtrAccessChain ptr base [d, e, f] - // into - // OpAccessChain original_ptr [a, b, c + base, d, e, f] - // to remove the need for OpPtrAccessChain - let last = original_indices.last_mut().unwrap(); - *last = self - .add(last.with_type(ptr_base_index.ty), ptr_base_index) - .def(self); - original_indices.extend(indices); - return self.emit_access_chain( - result_type, - original_ptr, - None, - original_indices, - is_inbounds, - ); + debug!("has original access chain, attempting to merge GEPs"); + + // Check if merging is possible. Requires: + // 1. The original AccessChain had at least one index. + // 2. The *last* index of the original AccessChain is a constant. + // 3. The *first* index (`ptr_base_index`) of the *current* GEP is a constant. + // Merging usually involves adding these two constant indices. + let can_merge = if let Some(&last_original_idx_id) = original_indices.last() { + // Check if both the last original index and the current base index are constant scalars. + self.builder + .lookup_const_scalar(last_original_idx_id.with_type(ptr_base_index.ty)) + .is_some() + && self.builder.lookup_const_scalar(ptr_base_index).is_some() + } else { + // Original access chain had no indices to merge with. + false + }; + + if can_merge { + let last_original_idx_id = original_indices.last_mut().unwrap(); + // Add the current `ptr_base_index` to the last index of the original chain. + // The result becomes the new last index. + *last_original_idx_id = self + .add( + // Ensure types match for add. + last_original_idx_id.with_type(ptr_base_index.ty), + ptr_base_index, + ) + // Define the result of the addition. + .def(self); + // Append the remaining indices (`indices`) from the current GEP operation. + original_indices.extend(gep_indices_ids); + + debug!( + "emitting merged access chain with pointer to type: {}", + self.debug_type(calculated_pointee_type) + ); + // Emit a *single* AccessChain using the *original* base pointer and the *merged* index list. + // The result type *must* be the `final_spirv_ptr_type` calculated earlier based on the full chain of operations. + return self.emit_access_chain( + final_spirv_ptr_type, // Use the strictly calculated final type. + original_ptr, // Base pointer from the *original* AccessChain. + None, // No separate base index; it's merged. + original_indices, // The combined list of indices. + is_inbounds, // Preserve original inbounds request. + ); + } else { + // Cannot merge because one or both relevant indices are not constant, + // or the original chain was empty. + debug!( + "Last index or base offset is not constant, or no last index, cannot merge." + ); + } } else { - debug!("no original access chain"); + // The base pointer `ptr` was not the result of an AccessChain, or merging + // wasn't attempted due to type mismatch. + debug!("no original access chain to merge with"); } + // --- End GEP Merging Path --- - debug!("maybe_inbounds_gep calling pointercast"); + // --- Fallback / Default Path --- - // HACK(eddyb) temporary workaround for untyped pointers upstream. - // FIXME(eddyb) replace with untyped memory SPIR-V + `qptr` or similar. + // This path is taken if neither the Recovery nor the Merging path succeeded or applied. + // It performs a more direct translation of the GEP request. + + // HACK(eddyb): Workaround for potential upstream issues where pointers might lack precise type info. + // FIXME(eddyb): Ideally, this should use untyped memory features if available/necessary. + + // Before emitting the AccessChain, explicitly cast the base pointer `ptr` to + // ensure its pointee type matches the input `ty`. This is required because the + // SPIR-V `AccessChain` instruction implicitly uses the size of the base + // pointer's pointee type when applying the *first* index operand (our + // `ptr_base_index`). If `ty` and `original_pointee_ty` mismatched and we + // reached this fallback, this cast ensures SPIR-V validity. + debug!("maybe_inbounds_gep fallback path calling pointercast"); + // Cast ptr to point to `ty`. let ptr = self.pointercast(ptr, self.type_ptr_to(ty)); + // Get the ID of the (potentially newly casted) pointer. let ptr_id = ptr.def(self); + debug!( + "emitting access chain via fallback path with pointer type: {}", + self.debug_type(final_spirv_ptr_type) + ); + // Emit the `AccessChain` instruction. self.emit_access_chain( - result_type, - ptr_id, - Some(ptr_base_index), - indices, - is_inbounds, + final_spirv_ptr_type, // Result *must* be a pointer to the final calculated type. + ptr_id, // Use the (potentially casted) base pointer ID. + Some(ptr_base_index), // Provide the first index separately. + gep_indices_ids, // Provide the rest of the indices. + is_inbounds, // Preserve original inbounds request. ) } From b04e9bc8593e616f19e524683eccf6026a1d1810 Mon Sep 17 00:00:00 2001 From: Christian Legnitto Date: Sat, 12 Apr 2025 15:45:27 -0400 Subject: [PATCH 4/9] Convert debug! to trace! and remove duplicated messages --- .../src/builder/builder_methods.rs | 117 ++++++++---------- 1 file changed, 53 insertions(+), 64 deletions(-) diff --git a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs index 43f4558894..d937ae4d0d 100644 --- a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs +++ b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs @@ -36,7 +36,7 @@ use std::iter::{self, empty}; use std::ops::RangeInclusive; use tracing::{Level, instrument, span}; -use tracing::{debug, warn}; +use tracing::{trace, warn}; macro_rules! simple_op { ( @@ -423,7 +423,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ptr: SpirvValue, ty: ::Type, ) -> (SpirvValue, ::Type) { - debug!("currently in adjust_pointer_for_typed_access"); self.lookup_type(ty) .sizeof(self) .and_then(|size| self.adjust_pointer_for_sized_access(ptr, size)) @@ -451,7 +450,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { other => self.fatal(format!("`ptr` is non-pointer type: {other:?}")), }; - debug!( + trace!( "before nested adjust_pointer_for_sized_access. `leaf_ty`: {}", self.debug_type(leaf_ty) ); @@ -467,7 +466,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { leaf_ty = inner_ty; } - debug!( + trace!( "after nested adjust_pointer_for_sized_access. `leaf_ty`: {}", self.debug_type(leaf_ty) ); @@ -488,7 +487,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { .with_type(leaf_ptr_ty) }; - debug!( + trace!( "adjust_pointer_for_sized_access returning {} {}", self.debug_type(leaf_ptr.ty), self.debug_type(leaf_ty) @@ -513,14 +512,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { leaf_ty: Option<::Type>, ) -> Option<(SmallVec<[u32; 8]>, ::Type)> { assert_ne!(Some(ty), leaf_ty); - debug!("recovering access chain: ty: {:?}", self.debug_type(ty)); if let Some(leaf_ty) = leaf_ty { - debug!( + trace!( "recovering access chain: leaf_ty: {:?}", self.debug_type(leaf_ty) ); } else { - debug!("recovering access chain: leaf_ty: None"); + trace!("recovering access chain: leaf_ty: None"); } // HACK(eddyb) this has the correct ordering (`Sized(_) < Unsized`). @@ -536,7 +534,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { start..=end }; - debug!("leaf_size_range: {:?}", leaf_size_range); + trace!("leaf_size_range: {:?}", leaf_size_range); // NOTE(eddyb) `ty` and `ty_kind`/`ty_size` should be kept in sync. let mut ty_kind = self.lookup_type(ty); @@ -550,7 +548,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { field_offsets, .. } => { - debug!("recovering access chain from ADT"); + trace!("recovering access chain from ADT"); let (i, field_ty, field_ty_kind, field_ty_size, offset_in_field) = field_offsets .iter() .enumerate() @@ -580,25 +578,25 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { })?; ty = field_ty; - debug!("setting ty = field_ty: {:?}", self.debug_type(field_ty)); + trace!("setting ty = field_ty: {:?}", self.debug_type(field_ty)); ty_kind = field_ty_kind; - debug!("setting ty_kind = field_ty_kind: {:?}", field_ty_kind); + trace!("setting ty_kind = field_ty_kind: {:?}", field_ty_kind); ty_size = field_ty_size; - debug!("setting ty_size = field_ty_size: {:?}", field_ty_size); + trace!("setting ty_size = field_ty_size: {:?}", field_ty_size); indices.push(i as u32); offset = offset_in_field; - debug!("setting offset = offset_in_field: {:?}", offset_in_field); + trace!("setting offset = offset_in_field: {:?}", offset_in_field); } SpirvType::Vector { element, .. } | SpirvType::Array { element, .. } | SpirvType::RuntimeArray { element } | SpirvType::Matrix { element, .. } => { - debug!("recovering access chain from Vector, Array, RuntimeArray, or Matrix"); + trace!("recovering access chain from Vector, Array, RuntimeArray, or Matrix"); ty = element; - debug!("setting ty = element: {:?}", self.debug_type(element)); + trace!("setting ty = element: {:?}", self.debug_type(element)); ty_kind = self.lookup_type(ty); - debug!("looked up ty kind: {:?}", ty_kind); + trace!("looked up ty kind: {:?}", ty_kind); let stride = ty_kind.sizeof(self)?; ty_size = MaybeSized::Sized(stride); @@ -607,14 +605,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { offset = Size::from_bytes(offset.bytes() % stride.bytes()); } _ => { - debug!("recovering access chain from SOMETHING ELSE, RETURNING NONE"); + trace!("recovering access chain from SOMETHING ELSE, RETURNING NONE"); return None; } } // Avoid digging beyond the point the leaf could actually fit. if ty_size < *leaf_size_range.start() { - debug!("avoiding digging beyond the point the leaf could actually fit"); + trace!("avoiding digging beyond the point the leaf could actually fit"); return None; } @@ -622,8 +620,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { && leaf_size_range.contains(&ty_size) && leaf_ty.map_or(true, |leaf_ty| leaf_ty == ty) { - debug!("returning type: {:?}", self.debug_type(ty)); - debug!("returning indices with len: {:?}", indices.len()); + trace!("returning type: {:?}", self.debug_type(ty)); + trace!("returning indices with len: {:?}", indices.len()); return Some((indices, ty)); } } @@ -654,14 +652,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // into fields or elements of `ty`. let (&ptr_base_index, indices) = combined_indices.split_first().unwrap(); - debug!("maybe_inbounds_gep: ty: {:?}", self.debug_type(ty)); - // Determine if this GEP operation is effectively byte-level addressing. // This check is based on the *provided* input type `ty`. If `ty` is i8 or u8, // it suggests the caller intends to perform byte-offset calculations, // which might allow for more flexible type recovery later. let is_byte_gep = matches!(self.lookup_type(ty), SpirvType::Integer(8, _)); - debug!("Is byte GEP (based on input type): {}", is_byte_gep); + trace!("Is byte GEP (based on input type): {}", is_byte_gep); // --- Calculate the final pointee type based on the GEP operation --- @@ -709,11 +705,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // type. This is the *required* result type for the SPIR-V `AccessChain` // instruction. let final_spirv_ptr_type = self.type_ptr_to(calculated_pointee_type); - debug!( + trace!( "Calculated final SPIR-V pointee type: {}", self.debug_type(calculated_pointee_type) ); - debug!( + trace!( "Calculated final SPIR-V ptr type: {}", self.debug_type(final_spirv_ptr_type) ); @@ -773,7 +769,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { None, ) { // Recovery successful! Found a structured path (`base_indices`) to the target offset. - debug!( + trace!( "`recover_access_chain_from_offset` returned Some with base_pointee_ty: {}", self.debug_type(base_pointee_ty) ); @@ -789,14 +785,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let result_wrapper_type = if !is_byte_gep || matches!(self.lookup_type(base_pointee_ty), SpirvType::Integer(8, _)) { - debug!( + trace!( "Using strictly calculated type for wrapper: {}", // Use type based on input `ty` + `indices` self.debug_type(calculated_pointee_type) ); final_spirv_ptr_type } else { - debug!( + trace!( "Byte GEP allowing recovered type for wrapper: {}", // Use type based on recovery result self.debug_type(base_pointee_ty) @@ -823,7 +819,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { .chain(gep_indices_ids.iter().copied()) .collect(); - debug!( + trace!( "emitting access chain via recovery path with wrapper type: {}", self.debug_type(result_wrapper_type) ); @@ -841,7 +837,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // AND there are more `indices` to process. Using the `base_indices` derived from // `original_pointee_ty` would be incorrect for interpreting the subsequent `indices` // which were intended to operate relative to `ty`. Fall back to the standard path. - debug!( + trace!( "Recovery type mismatch ({}) vs ({}) and GEP indices exist, falling back", self.debug_type(ty), self.debug_type(base_pointee_ty) @@ -849,7 +845,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } else { // `recover_access_chain_from_offset` couldn't find a structured path for the constant offset. - debug!("`recover_access_chain_from_offset` returned None, falling back"); + trace!("`recover_access_chain_from_offset` returned None, falling back"); } } @@ -900,7 +896,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // If we found that `ptr` was defined by a previous `AccessChain`... if let Some((original_ptr, mut original_indices)) = maybe_original_access_chain { - debug!("has original access chain, attempting to merge GEPs"); + trace!("has original access chain, attempting to merge GEPs"); // Check if merging is possible. Requires: // 1. The original AccessChain had at least one index. @@ -933,7 +929,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Append the remaining indices (`indices`) from the current GEP operation. original_indices.extend(gep_indices_ids); - debug!( + trace!( "emitting merged access chain with pointer to type: {}", self.debug_type(calculated_pointee_type) ); @@ -949,14 +945,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } else { // Cannot merge because one or both relevant indices are not constant, // or the original chain was empty. - debug!( + trace!( "Last index or base offset is not constant, or no last index, cannot merge." ); } } else { // The base pointer `ptr` was not the result of an AccessChain, or merging // wasn't attempted due to type mismatch. - debug!("no original access chain to merge with"); + trace!("no original access chain to merge with"); } // --- End GEP Merging Path --- @@ -974,13 +970,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // pointer's pointee type when applying the *first* index operand (our // `ptr_base_index`). If `ty` and `original_pointee_ty` mismatched and we // reached this fallback, this cast ensures SPIR-V validity. - debug!("maybe_inbounds_gep fallback path calling pointercast"); + trace!("maybe_inbounds_gep fallback path calling pointercast"); // Cast ptr to point to `ty`. let ptr = self.pointercast(ptr, self.type_ptr_to(ty)); // Get the ID of the (potentially newly casted) pointer. let ptr_id = ptr.def(self); - debug!( + trace!( "emitting access chain via fallback path with pointer type: {}", self.debug_type(final_spirv_ptr_type) ); @@ -2079,13 +2075,8 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { } } - #[instrument(level = "trace", skip(self), fields(dest_ty = ?self.debug_type(dest_ty)))] + #[instrument(level = "trace", skip(self), fields(val_type = ?self.debug_type(val.ty), dest_ty = ?self.debug_type(dest_ty)))] fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value { - debug!( - "bitcast val: {} -> dest: {}", - self.debug_type(val.ty), - self.debug_type(dest_ty) - ); if val.ty == dest_ty { val } else { @@ -2128,7 +2119,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { .unwrap() .with_type(in_leaf_ty); - debug!( + trace!( "unpacked newtype. val: {} -> in_leaf_ty: {}", self.debug_type(val.ty), self.debug_type(in_leaf_ty), @@ -2138,14 +2129,14 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { // Repack output newtypes, after bitcasting the leaf inside, instead. if let Some((indices, out_leaf_ty)) = unpack_newtype(dest_ty, dest_ty_kind) { - debug!( + trace!( "unpacked newtype: dest: {} -> out_leaf_ty: {}", self.debug_type(dest_ty), self.debug_type(out_leaf_ty), ); let out_leaf = self.bitcast(val, out_leaf_ty); let out_agg_undef = self.undef(dest_ty); - debug!("returning composite insert"); + trace!("returning composite insert"); return self .emit() .composite_insert( @@ -2164,11 +2155,11 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { // Reuse the pointer-specific logic in `pointercast` for `*T -> *U`. if val_is_ptr && dest_is_ptr { - debug!("val and dest are both pointers"); + trace!("val and dest are both pointers"); return self.pointercast(val, dest_ty); } - debug!( + trace!( "before emitting: val ty: {} -> dest ty: {}", self.debug_type(val.ty), self.debug_type(dest_ty) @@ -2259,19 +2250,19 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { // HACK(eddyb) reuse the special-casing in `const_bitcast`, which relies // on adding a pointer type to an untyped pointer (to some const data). if let SpirvValueKind::IllegalConst(_) = ptr.kind { - debug!("illegal const"); + trace!("illegal const"); return self.const_bitcast(ptr, dest_ty); } if ptr.ty == dest_ty { - debug!("ptr.ty == dest_ty"); + trace!("ptr.ty == dest_ty"); return ptr; } // Strip a previous `pointercast`, to reveal the original pointer type. let ptr = ptr.strip_ptrcasts(); - debug!( + trace!( "ptr type after strippint pointer cases: {}", self.debug_type(ptr.ty), ); @@ -2300,8 +2291,8 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { dest_pointee_size..=dest_pointee_size, Some(dest_pointee), ) { - debug!("`recover_access_chain_from_offset` returned something"); - debug!( + trace!("`recover_access_chain_from_offset` returned something"); + trace!( "ptr_pointee: {}, dest_pointee {}", self.debug_type(ptr_pointee), self.debug_type(dest_pointee), @@ -2315,8 +2306,8 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { .unwrap() .with_type(dest_ty) } else { - debug!("`recover_access_chain_from_offset` returned `None`"); - debug!( + trace!("`recover_access_chain_from_offset` returned `None`"); + trace!( "ptr_pointee: {}, dest_pointee {}", self.debug_type(ptr_pointee), self.debug_type(dest_pointee), @@ -2612,7 +2603,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { } let typed_copy_dst_src = const_size.and_then(|const_size| { - debug!( + trace!( "adjusting pointers: src: {} -> dst: {}", self.debug_type(src.ty), self.debug_type(dst.ty), @@ -2622,7 +2613,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { match (dst_adj, src_adj) { // HACK(eddyb) fill in missing `dst`/`src` with the other side. (Some((dst, access_ty)), None) => { - debug!( + trace!( "DESTINATION adjusted memcpy calling pointercast: dst ty: {}, access ty: {}", self.debug_type(dst.ty), self.debug_type(access_ty) @@ -2630,7 +2621,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { Some((dst, self.pointercast(src, self.type_ptr_to(access_ty)))) } (None, Some((src, access_ty))) => { - debug!( + trace!( "SOURCE adjusted memcpy calling pointercast: dst ty: {} -> access ty: {}, src ty: {}", self.debug_type(dst.ty), self.debug_type(access_ty), @@ -2641,7 +2632,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { (Some((dst, dst_access_ty)), Some((src, src_access_ty))) if dst_access_ty == src_access_ty => { - debug!("BOTH adjusted memcpy calling pointercast"); + trace!("BOTH adjusted memcpy calling pointercast"); Some((dst, src)) } (None, None) | (Some(_), Some(_)) => None, @@ -2650,10 +2641,10 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { if let Some((dst, src)) = typed_copy_dst_src { if let Some(const_value) = src.const_fold_load(self) { - debug!("storing const value"); + trace!("storing const value"); self.store(const_value, dst, Align::from_bytes(0).unwrap()); } else { - debug!("copying memory using OpCopyMemory"); + trace!("copying memory using OpCopyMemory"); self.emit() .copy_memory(dst.def(self), src.def(self), None, None, empty()) .unwrap(); @@ -2817,7 +2808,6 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { other => self.fatal(format!("insert_value not implemented on type {other:?}")), }; - debug!("insert value"); // HACK(eddyb) temporary workaround for untyped pointers upstream. // FIXME(eddyb) replace with untyped memory SPIR-V + `qptr` or similar. let elt = self.bitcast(elt, field_type); @@ -3112,7 +3102,6 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { ), }; - debug!("call"); // HACK(eddyb) temporary workaround for untyped pointers upstream. // FIXME(eddyb) replace with untyped memory SPIR-V + `qptr` or similar. let args: SmallVec<[_; 8]> = args From 0530d6591670fbfd7952f9bd85157cc5a7fb7d2e Mon Sep 17 00:00:00 2001 From: Christian Legnitto Date: Sat, 12 Apr 2025 16:02:41 -0400 Subject: [PATCH 5/9] Add back fixme --- crates/rustc_codegen_spirv/src/builder/builder_methods.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs index d937ae4d0d..4176762456 100644 --- a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs +++ b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs @@ -859,6 +859,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // If they differ, merging could be invalid. let maybe_original_access_chain = if ty == original_pointee_ty { // Search the current function's instructions... + // FIXME(eddyb) this could get ridiculously expensive, at the very least + // it could use `.rev()`, hoping the base pointer was recently defined? let search_result = { let emit = self.emit(); let module = emit.module_ref(); From f03fae5bcd734a29903f33a02f42377a82626328 Mon Sep 17 00:00:00 2001 From: Christian Legnitto Date: Sat, 12 Apr 2025 16:11:58 -0400 Subject: [PATCH 6/9] Fix clippy issue --- crates/rustc_codegen_spirv/src/builder/builder_methods.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs index 4176762456..3f1536ac00 100644 --- a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs +++ b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs @@ -627,7 +627,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - #[instrument(level = "trace", skip(self), fields(ty = ?self.debug_type(ty), ptr, combined_indices = ?combined_indices.into_iter().map(|x| (self.debug_type(x.ty), x.kind)).collect::>(), is_inbounds))] + #[instrument(level = "trace", skip(self), fields(ty = ?self.debug_type(ty), ptr, combined_indices = ?combined_indices.iter().map(|x| (self.debug_type(x.ty), x.kind)).collect::>(), is_inbounds))] fn maybe_inbounds_gep( &mut self, // Represents the type of the element that `ptr` is assumed to point to From 0fc8de0399f5907710a87e1feb6dd74f1d450cf6 Mon Sep 17 00:00:00 2001 From: Christian Legnitto Date: Sat, 12 Apr 2025 16:17:51 -0400 Subject: [PATCH 7/9] Add back pointer to spec / docs about first GEP offset --- crates/rustc_codegen_spirv/src/builder/builder_methods.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs index 3f1536ac00..c9c05817b4 100644 --- a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs +++ b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs @@ -650,6 +650,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // the index applied directly to `ptr`, effectively an offset multiplier based // on the size of `ty`. `indices` are the subsequent indices used to drill down // into fields or elements of `ty`. + // https://llvm.org/docs/GetElementPtr.html + // "An OpAccessChain instruction is the equivalent of an LLVM getelementptr instruction where the first index element is zero." + // https://github.com/gpuweb/gpuweb/issues/33 let (&ptr_base_index, indices) = combined_indices.split_first().unwrap(); // Determine if this GEP operation is effectively byte-level addressing. From e38c1a4b7e1720444cd355956c88ed478fd1db7d Mon Sep 17 00:00:00 2001 From: Christian Legnitto Date: Sat, 12 Apr 2025 16:40:45 -0400 Subject: [PATCH 8/9] Missed one debug -> trace --- crates/rustc_codegen_spirv/src/builder/builder_methods.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs index c9c05817b4..3a8b4c1801 100644 --- a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs +++ b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs @@ -996,7 +996,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } #[instrument( - level = "debug", + level = "trace", skip(self), fields( result_type = ?self.debug_type(result_type), From 16b61ce9c871afa7d305d0b35e165585bde77546 Mon Sep 17 00:00:00 2001 From: Christian Legnitto Date: Sat, 12 Apr 2025 16:42:41 -0400 Subject: [PATCH 9/9] Another debug -> trace --- crates/rustc_codegen_spirv/src/codegen_cx/constant.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs b/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs index b2427fedef..1461f23246 100644 --- a/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs +++ b/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs @@ -367,7 +367,7 @@ impl<'tcx> CodegenCx<'tcx> { } pub fn create_const_alloc(&self, alloc: ConstAllocation<'tcx>, ty: Word) -> SpirvValue { - tracing::debug!( + tracing::trace!( "Creating const alloc of type {} with {} bytes", self.debug_type(ty), alloc.inner().len() @@ -379,7 +379,7 @@ impl<'tcx> CodegenCx<'tcx> { alloc.inner().len(), "create_const_alloc must consume all bytes of an Allocation" ); - tracing::debug!("Done creating alloc of type {}", self.debug_type(ty)); + tracing::trace!("Done creating alloc of type {}", self.debug_type(ty)); result }