diff options
| author | Guillaume Gomez <guillaume1.gomez@gmail.com> | 2025-06-18 15:11:44 +0200 |
|---|---|---|
| committer | Guillaume Gomez <guillaume1.gomez@gmail.com> | 2025-06-18 15:11:44 +0200 |
| commit | c48d8d4d800df8a59d412c5c4299c8fadcd039e9 (patch) | |
| tree | cba780e8e9940952265aa1587419e557994e3423 /compiler/rustc_codegen_gcc/src/common.rs | |
| parent | 6f935a044d1ddeb6160494a6320d008d7c311aef (diff) | |
| parent | fda0bb9588912a3e0606e880ca9f6e913cf8a5a4 (diff) | |
| download | rust-c48d8d4d800df8a59d412c5c4299c8fadcd039e9.tar.gz rust-c48d8d4d800df8a59d412c5c4299c8fadcd039e9.zip | |
Merge commit 'fda0bb9588912a3e0606e880ca9f6e913cf8a5a4' into subtree-update_cg_gcc_2025-06-18
Diffstat (limited to 'compiler/rustc_codegen_gcc/src/common.rs')
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/common.rs | 102 |
1 files changed, 90 insertions, 12 deletions
diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index 918195364ff..58ff2f1f8f0 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -9,7 +9,6 @@ use rustc_middle::mir::Mutability; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; use rustc_middle::ty::layout::LayoutOf; -use crate::consts::const_alloc_to_gcc; use crate::context::CodegenCx; use crate::type_of::LayoutGccExt; @@ -46,12 +45,65 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> { - let context = &cx.context; - let byte_type = context.new_type::<u8>(); - let typ = context.new_array_type(None, byte_type, bytes.len() as u64); - let elements: Vec<_> = - bytes.iter().map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32)).collect(); - context.new_array_constructor(None, typ, &elements) + // Instead of always using an array of bytes, use an array of larger integers of target endianness + // if possible. This reduces the amount of `rvalues` we use, which reduces memory usage significantly. + // + // FIXME(FractalFir): Consider using `global_set_initializer` instead. Before this is done, we need to confirm that + // `global_set_initializer` is more memory efficient than the current solution. + // `global_set_initializer` calls `global_set_initializer_rvalue` under the hood - does it generate an array of rvalues, + // or is it using a more efficient representation? + match bytes.len() % 8 { + 0 => { + let context = &cx.context; + let byte_type = context.new_type::<u64>(); + let typ = context.new_array_type(None, byte_type, bytes.len() as u64 / 8); + let elements: Vec<_> = bytes + .chunks_exact(8) + .map(|arr| { + let arr: [u8; 8] = arr.try_into().unwrap(); + context.new_rvalue_from_long( + byte_type, + // Since we are representing arbitrary byte runs as integers, we need to follow the target + // endianness. + match cx.sess().target.options.endian { + rustc_abi::Endian::Little => u64::from_le_bytes(arr) as i64, + rustc_abi::Endian::Big => u64::from_be_bytes(arr) as i64, + }, + ) + }) + .collect(); + context.new_array_constructor(None, typ, &elements) + } + 4 => { + let context = &cx.context; + let byte_type = context.new_type::<u32>(); + let typ = context.new_array_type(None, byte_type, bytes.len() as u64 / 4); + let elements: Vec<_> = bytes + .chunks_exact(4) + .map(|arr| { + let arr: [u8; 4] = arr.try_into().unwrap(); + context.new_rvalue_from_int( + byte_type, + match cx.sess().target.options.endian { + rustc_abi::Endian::Little => u32::from_le_bytes(arr) as i32, + rustc_abi::Endian::Big => u32::from_be_bytes(arr) as i32, + }, + ) + }) + .collect(); + context.new_array_constructor(None, typ, &elements) + } + _ => { + let context = cx.context; + let byte_type = context.new_type::<u8>(); + let typ = context.new_array_type(None, byte_type, bytes.len() as u64); + let elements: Vec<_> = bytes + .iter() + .map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32)) + .collect(); + context.new_array_constructor(None, typ, &elements) + } + } } pub fn type_is_pointer(typ: Type<'_>) -> bool { @@ -185,14 +237,15 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> { // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code // the paths for floating-point values. - if ty == self.float_type { + // TODO: Remove this code? + /*if ty == self.float_type { return self .context .new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64); } if ty == self.double_type { return self.context.new_rvalue_from_double(ty, f64::from_bits(data as u64)); - } + }*/ let value = self.const_uint_big(self.type_ix(bitsize), data); let bytesize = layout.size(self).bytes(); @@ -212,7 +265,20 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> { let alloc_id = prov.alloc_id(); let base_addr = match self.tcx.global_alloc(alloc_id) { GlobalAlloc::Memory(alloc) => { - let init = const_alloc_to_gcc(self, alloc); + // For ZSTs directly codegen an aligned pointer. + // This avoids generating a zero-sized constant value and actually needing a + // real address at runtime. + if alloc.inner().len() == 0 { + assert_eq!(offset.bytes(), 0); + let val = self.const_usize(alloc.inner().align.bytes()); + return if matches!(layout.primitive(), Pointer(_)) { + self.context.new_cast(None, val, ty) + } else { + self.const_bitcast(val, ty) + }; + } + + let init = self.const_data_from_alloc(alloc); let alloc = alloc.inner(); let value = match alloc.mutability { Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), @@ -234,7 +300,7 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> { }), ))) .unwrap_memory(); - let init = const_alloc_to_gcc(self, alloc); + let init = self.const_data_from_alloc(alloc); self.static_addr_of(init, alloc.inner().align, None) } GlobalAlloc::Static(def_id) => { @@ -257,7 +323,19 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> { } fn const_data_from_alloc(&self, alloc: ConstAllocation<'_>) -> Self::Value { - const_alloc_to_gcc(self, alloc) + // We ignore the alignment for the purpose of deduping RValues + // The alignment is not handled / used in any way by `const_alloc_to_gcc`, + // so it is OK to overwrite it here. + let mut mock_alloc = alloc.inner().clone(); + mock_alloc.align = rustc_abi::Align::MAX; + // Check if the rvalue is already in the cache - if so, just return it directly. + if let Some(res) = self.const_cache.borrow().get(&mock_alloc) { + return *res; + } + // Rvalue not in the cache - convert and add it. + let res = crate::consts::const_alloc_to_gcc_uncached(self, alloc); + self.const_cache.borrow_mut().insert(mock_alloc, res); + res } fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value { |
