From 2b22d0f0d2b9d0d71025065db93058e34f846600 Mon Sep 17 00:00:00 2001 From: Daniel Paoliello Date: Thu, 3 Jul 2025 09:17:48 -0700 Subject: Make __rust_alloc_error_handler_should_panic a function --- compiler/rustc_codegen_llvm/src/allocator.rs | 45 +++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 8 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index 9dca63cfc8d..2b5090ed6db 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -11,7 +11,7 @@ use rustc_symbol_mangling::mangle_internal_symbol; use crate::builder::SBuilder; use crate::declare::declare_simple_fn; -use crate::llvm::{self, False, True, Type}; +use crate::llvm::{self, False, True, Type, Value}; use crate::{SimpleCx, attributes, debuginfo}; pub(crate) unsafe fn codegen( @@ -73,13 +73,14 @@ pub(crate) unsafe fn codegen( ); unsafe { - // __rust_alloc_error_handler_should_panic - let name = mangle_internal_symbol(tcx, OomStrategy::SYMBOL); - let ll_g = cx.declare_global(&name, i8); - llvm::set_visibility(ll_g, llvm::Visibility::from_generic(tcx.sess.default_visibility())); - let val = tcx.sess.opts.unstable_opts.oom.should_panic(); - let llval = llvm::LLVMConstInt(i8, val as u64, False); - llvm::set_initializer(ll_g, llval); + // __rust_alloc_error_handler_should_panic_v2 + create_const_value_function( + tcx, + &cx, + &mangle_internal_symbol(tcx, OomStrategy::SYMBOL), + &i8, + &llvm::LLVMConstInt(i8, tcx.sess.opts.unstable_opts.oom.should_panic() as u64, False), + ); // __rust_no_alloc_shim_is_unstable_v2 create_wrapper_function( @@ -100,6 +101,34 @@ pub(crate) unsafe fn codegen( } } +fn create_const_value_function( + tcx: TyCtxt<'_>, + cx: &SimpleCx<'_>, + name: &str, + output: &Type, + value: &Value, +) { + let ty = cx.type_func(&[], output); + let llfn = declare_simple_fn( + &cx, + name, + llvm::CallConv::CCallConv, + llvm::UnnamedAddr::Global, + llvm::Visibility::from_generic(tcx.sess.default_visibility()), + ty, + ); + + attributes::apply_to_llfn( + llfn, + llvm::AttributePlace::Function, + &[llvm::AttributeKind::AlwaysInline.create_attr(cx.llcx)], + ); + + let llbb = unsafe { llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, c"entry".as_ptr()) }; + let mut bx = SBuilder::build(&cx, llbb); + bx.ret(value); +} + fn create_wrapper_function( tcx: TyCtxt<'_>, cx: &SimpleCx<'_>, -- cgit 1.4.1-3-g733a5 From 226b0fbe11812c71c8002b10a40063571cf52b3f Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Sat, 5 Jul 2025 08:26:32 +0200 Subject: use `is_multiple_of` instead of manual modulo --- compiler/rustc_borrowck/src/polonius/legacy/location.rs | 2 +- compiler/rustc_codegen_llvm/src/abi.rs | 2 +- compiler/rustc_const_eval/src/interpret/memory.rs | 4 ++-- compiler/rustc_query_system/src/query/plumbing.rs | 2 +- compiler/rustc_target/src/callconv/sparc64.rs | 6 +++--- compiler/rustc_ty_utils/src/layout/invariant.rs | 2 +- library/core/src/slice/mod.rs | 8 ++++---- library/core/src/slice/sort/shared/smallsort.rs | 2 +- library/core/src/str/count.rs | 2 +- library/core/src/str/validations.rs | 2 +- 10 files changed, 16 insertions(+), 16 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_borrowck/src/polonius/legacy/location.rs b/compiler/rustc_borrowck/src/polonius/legacy/location.rs index 5f816bb9bbd..618119a6a3d 100644 --- a/compiler/rustc_borrowck/src/polonius/legacy/location.rs +++ b/compiler/rustc_borrowck/src/polonius/legacy/location.rs @@ -109,6 +109,6 @@ impl PoloniusLocationTable { impl LocationIndex { fn is_start(self) -> bool { // even indices are start points; odd indices are mid points - (self.index() % 2) == 0 + self.index().is_multiple_of(2) } } diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 4b07c8aef91..009e7e2487b 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -146,7 +146,7 @@ impl LlvmType for CastTarget { "total size {:?} cannot be divided into units of zero size", self.rest.total ); - if self.rest.total.bytes() % self.rest.unit.size.bytes() != 0 { + if !self.rest.total.bytes().is_multiple_of(self.rest.unit.size.bytes()) { assert_eq!(self.rest.unit.kind, RegKind::Integer, "only int regs can be split"); } self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes()) diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index ff822b52a8d..c97d53a45de 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -537,7 +537,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { #[inline] fn is_offset_misaligned(offset: u64, align: Align) -> Option { - if offset % align.bytes() == 0 { + if offset.is_multiple_of(align.bytes()) { None } else { // The biggest power of two through which `offset` is divisible. @@ -1554,7 +1554,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // If the allocation is N-aligned, and the offset is not divisible by N, // then `base + offset` has a non-zero remainder after division by `N`, // which means `base + offset` cannot be null. - if offset.bytes() % info.align.bytes() != 0 { + if !offset.bytes().is_multiple_of(info.align.bytes()) { return interp_ok(false); } // We don't know enough, this might be null. diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 3c1fc731784..06e59eb4ccc 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -597,7 +597,7 @@ where // from disk. Re-hashing results is fairly expensive, so we can't // currently afford to verify every hash. This subset should still // give us some coverage of potential bugs though. - let try_verify = prev_fingerprint.split().1.as_u64() % 32 == 0; + let try_verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32); if std::intrinsics::unlikely( try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich, ) { diff --git a/compiler/rustc_target/src/callconv/sparc64.rs b/compiler/rustc_target/src/callconv/sparc64.rs index 186826c08fc..ecc9067ced3 100644 --- a/compiler/rustc_target/src/callconv/sparc64.rs +++ b/compiler/rustc_target/src/callconv/sparc64.rs @@ -90,7 +90,7 @@ where _ => {} } - if (offset.bytes() % 4) != 0 + if !offset.bytes().is_multiple_of(4) && matches!(scalar2.primitive(), Primitive::Float(Float::F32 | Float::F64)) { offset += Size::from_bytes(4 - (offset.bytes() % 4)); @@ -181,7 +181,7 @@ where // Structure { float, int, int } doesn't like to be handled like // { float, long int }. Other way around it doesn't mind. if data.last_offset < arg.layout.size - && (data.last_offset.bytes() % 8) != 0 + && !data.last_offset.bytes().is_multiple_of(8) && data.prefix_index < data.prefix.len() { data.prefix[data.prefix_index] = Some(Reg::i32()); @@ -190,7 +190,7 @@ where } let mut rest_size = arg.layout.size - data.last_offset; - if (rest_size.bytes() % 8) != 0 && data.prefix_index < data.prefix.len() { + if !rest_size.bytes().is_multiple_of(8) && data.prefix_index < data.prefix.len() { data.prefix[data.prefix_index] = Some(Reg::i32()); rest_size = rest_size - Reg::i32().size; } diff --git a/compiler/rustc_ty_utils/src/layout/invariant.rs b/compiler/rustc_ty_utils/src/layout/invariant.rs index 4b65c05d0e9..1311ee31182 100644 --- a/compiler/rustc_ty_utils/src/layout/invariant.rs +++ b/compiler/rustc_ty_utils/src/layout/invariant.rs @@ -8,7 +8,7 @@ use rustc_middle::ty::layout::{HasTyCtxt, LayoutCx, TyAndLayout}; pub(super) fn layout_sanity_check<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayout<'tcx>) { let tcx = cx.tcx(); - if layout.size.bytes() % layout.align.abi.bytes() != 0 { + if !layout.size.bytes().is_multiple_of(layout.align.abi.bytes()) { bug!("size is not a multiple of align, in the following layout:\n{layout:#?}"); } if layout.size.bytes() >= tcx.data_layout.obj_size_bound() { diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index 3a3f44c6b85..dc09ba8d788 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -1316,7 +1316,7 @@ impl [T] { assert_unsafe_precondition!( check_language_ub, "slice::as_chunks_unchecked requires `N != 0` and the slice to split exactly into `N`-element chunks", - (n: usize = N, len: usize = self.len()) => n != 0 && len % n == 0, + (n: usize = N, len: usize = self.len()) => n != 0 && len.is_multiple_of(n), ); // SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length let new_len = unsafe { exact_div(self.len(), N) }; @@ -1512,7 +1512,7 @@ impl [T] { assert_unsafe_precondition!( check_language_ub, "slice::as_chunks_unchecked requires `N != 0` and the slice to split exactly into `N`-element chunks", - (n: usize = N, len: usize = self.len()) => n != 0 && len % n == 0 + (n: usize = N, len: usize = self.len()) => n != 0 && len.is_multiple_of(n) ); // SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length let new_len = unsafe { exact_div(self.len(), N) }; @@ -4866,7 +4866,7 @@ impl [T] { let byte_offset = elem_start.wrapping_sub(self_start); - if byte_offset % size_of::() != 0 { + if !byte_offset.is_multiple_of(size_of::()) { return None; } @@ -4920,7 +4920,7 @@ impl [T] { let byte_start = subslice_start.wrapping_sub(self_start); - if byte_start % size_of::() != 0 { + if !byte_start.is_multiple_of(size_of::()) { return None; } diff --git a/library/core/src/slice/sort/shared/smallsort.rs b/library/core/src/slice/sort/shared/smallsort.rs index 4280f7570db..400daba16c1 100644 --- a/library/core/src/slice/sort/shared/smallsort.rs +++ b/library/core/src/slice/sort/shared/smallsort.rs @@ -823,7 +823,7 @@ unsafe fn bidirectional_merge bool>( let right_end = right_rev.wrapping_add(1); // Odd length, so one element is left unconsumed in the input. - if len % 2 != 0 { + if !len.is_multiple_of(2) { let left_nonempty = left < left_end; let last_src = if left_nonempty { left } else { right }; ptr::copy_nonoverlapping(last_src, dst, 1); diff --git a/library/core/src/str/count.rs b/library/core/src/str/count.rs index 452403b23de..f59ad3e66b4 100644 --- a/library/core/src/str/count.rs +++ b/library/core/src/str/count.rs @@ -52,7 +52,7 @@ fn do_count_chars(s: &str) -> usize { // Check the properties of `CHUNK_SIZE` and `UNROLL_INNER` that are required // for correctness. const _: () = assert!(CHUNK_SIZE < 256); - const _: () = assert!(CHUNK_SIZE % UNROLL_INNER == 0); + const _: () = assert!(CHUNK_SIZE.is_multiple_of(UNROLL_INNER)); // SAFETY: transmuting `[u8]` to `[usize]` is safe except for size // differences which are handled by `align_to`. diff --git a/library/core/src/str/validations.rs b/library/core/src/str/validations.rs index 8174e4ff97d..b54d6478e58 100644 --- a/library/core/src/str/validations.rs +++ b/library/core/src/str/validations.rs @@ -219,7 +219,7 @@ pub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> { // Ascii case, try to skip forward quickly. // When the pointer is aligned, read 2 words of data per iteration // until we find a word containing a non-ascii byte. - if align != usize::MAX && align.wrapping_sub(index) % USIZE_BYTES == 0 { + if align != usize::MAX && align.wrapping_sub(index).is_multiple_of(USIZE_BYTES) { let ptr = v.as_ptr(); while index < blocks_end { // SAFETY: since `align - index` and `ascii_block_size` are -- cgit 1.4.1-3-g733a5 From ed3711ea29398b09483e4e2a3930567e9ba81d93 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Sat, 5 Jul 2025 08:36:27 +0200 Subject: use `div_ceil` instead of manual logic --- compiler/rustc_abi/src/lib.rs | 3 +-- compiler/rustc_codegen_llvm/src/va_arg.rs | 4 ++-- compiler/rustc_index/src/bit_set.rs | 4 ++-- compiler/rustc_passes/src/liveness/rwu_table.rs | 2 +- compiler/rustc_serialize/src/leb128.rs | 2 +- compiler/rustc_span/src/edit_distance.rs | 2 +- compiler/rustc_target/src/callconv/x86.rs | 2 +- compiler/rustc_target/src/callconv/x86_64.rs | 2 +- compiler/rustc_target/src/callconv/xtensa.rs | 2 +- library/core/src/slice/sort/stable/drift.rs | 4 ++-- library/core/src/str/iter.rs | 6 +++--- library/portable-simd/crates/core_simd/src/lane_count.rs | 2 +- 12 files changed, 17 insertions(+), 18 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 0df8921c9b7..a438545c76f 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -527,8 +527,7 @@ impl Size { /// not a multiple of 8. pub fn from_bits(bits: impl TryInto) -> Size { let bits = bits.try_into().ok().unwrap(); - // Avoid potential overflow from `bits + 7`. - Size { raw: bits / 8 + ((bits % 8) + 7) / 8 } + Size { raw: bits.div_ceil(8) } } #[inline] diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index 4fe4c9bcbf2..486dc894a4e 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -172,10 +172,10 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( let gr_type = target_ty.is_any_ptr() || target_ty.is_integral(); let (reg_off, reg_top, slot_size) = if gr_type { - let nreg = (layout.size.bytes() + 7) / 8; + let nreg = layout.size.bytes().div_ceil(8); (gr_offs, gr_top, nreg * 8) } else { - let nreg = (layout.size.bytes() + 15) / 16; + let nreg = layout.size.bytes().div_ceil(16); (vr_offs, vr_top, nreg * 16) }; diff --git a/compiler/rustc_index/src/bit_set.rs b/compiler/rustc_index/src/bit_set.rs index a4885aabe1f..645d95b1dba 100644 --- a/compiler/rustc_index/src/bit_set.rs +++ b/compiler/rustc_index/src/bit_set.rs @@ -1744,13 +1744,13 @@ impl SparseBitMatrix { #[inline] fn num_words(domain_size: T) -> usize { - (domain_size.index() + WORD_BITS - 1) / WORD_BITS + domain_size.index().div_ceil(WORD_BITS) } #[inline] fn num_chunks(domain_size: T) -> usize { assert!(domain_size.index() > 0); - (domain_size.index() + CHUNK_BITS - 1) / CHUNK_BITS + domain_size.index().div_ceil(CHUNK_BITS) } #[inline] diff --git a/compiler/rustc_passes/src/liveness/rwu_table.rs b/compiler/rustc_passes/src/liveness/rwu_table.rs index 4c1f6ea141e..a1177946f86 100644 --- a/compiler/rustc_passes/src/liveness/rwu_table.rs +++ b/compiler/rustc_passes/src/liveness/rwu_table.rs @@ -44,7 +44,7 @@ impl RWUTable { const WORD_RWU_COUNT: usize = Self::WORD_BITS / Self::RWU_BITS; pub(super) fn new(live_nodes: usize, vars: usize) -> RWUTable { - let live_node_words = (vars + Self::WORD_RWU_COUNT - 1) / Self::WORD_RWU_COUNT; + let live_node_words = vars.div_ceil(Self::WORD_RWU_COUNT); Self { live_nodes, vars, live_node_words, words: vec![0u8; live_node_words * live_nodes] } } diff --git a/compiler/rustc_serialize/src/leb128.rs b/compiler/rustc_serialize/src/leb128.rs index 954c1f728f2..da328dcea03 100644 --- a/compiler/rustc_serialize/src/leb128.rs +++ b/compiler/rustc_serialize/src/leb128.rs @@ -7,7 +7,7 @@ use crate::serialize::Decoder; /// Returns the length of the longest LEB128 encoding for `T`, assuming `T` is an integer type pub const fn max_leb128_len() -> usize { // The longest LEB128 encoding for an integer uses 7 bits per byte. - (size_of::() * 8 + 6) / 7 + (size_of::() * 8).div_ceil(7) } /// Returns the length of the longest LEB128 encoding of all supported integer types. diff --git a/compiler/rustc_span/src/edit_distance.rs b/compiler/rustc_span/src/edit_distance.rs index 4f3202b694c..416e9daa8fb 100644 --- a/compiler/rustc_span/src/edit_distance.rs +++ b/compiler/rustc_span/src/edit_distance.rs @@ -130,7 +130,7 @@ pub fn edit_distance_with_substrings(a: &str, b: &str, limit: usize) -> Option( continue; } - let size_in_regs = (arg.layout.size.bits() + 31) / 32; + let size_in_regs = arg.layout.size.bits().div_ceil(32); if size_in_regs == 0 { continue; diff --git a/compiler/rustc_target/src/callconv/x86_64.rs b/compiler/rustc_target/src/callconv/x86_64.rs index 700ee73c8fd..d8db7ed6e4c 100644 --- a/compiler/rustc_target/src/callconv/x86_64.rs +++ b/compiler/rustc_target/src/callconv/x86_64.rs @@ -95,7 +95,7 @@ where Ok(()) } - let n = ((arg.layout.size.bytes() + 7) / 8) as usize; + let n = arg.layout.size.bytes().div_ceil(8) as usize; if n > MAX_EIGHTBYTES { return Err(Memory); } diff --git a/compiler/rustc_target/src/callconv/xtensa.rs b/compiler/rustc_target/src/callconv/xtensa.rs index b687f0e20c6..a73a70a1a0c 100644 --- a/compiler/rustc_target/src/callconv/xtensa.rs +++ b/compiler/rustc_target/src/callconv/xtensa.rs @@ -54,7 +54,7 @@ where // Determine the number of GPRs needed to pass the current argument // according to the ABI. 2*XLen-aligned varargs are passed in "aligned" // register pairs, so may consume 3 registers. - let mut needed_arg_gprs = (size + 32 - 1) / 32; + let mut needed_arg_gprs = size.div_ceil(32); if needed_align == 64 { needed_arg_gprs += *arg_gprs_left % 2; } diff --git a/library/core/src/slice/sort/stable/drift.rs b/library/core/src/slice/sort/stable/drift.rs index cf1df1e91a5..1edffe095a8 100644 --- a/library/core/src/slice/sort/stable/drift.rs +++ b/library/core/src/slice/sort/stable/drift.rs @@ -158,7 +158,7 @@ fn merge_tree_scale_factor(n: usize) -> u64 { panic!("Platform not supported"); } - ((1 << 62) + n as u64 - 1) / n as u64 + (1u64 << 62).div_ceil(n as u64) } // Note: merge_tree_depth output is < 64 when left < right as f*x and f*y must @@ -182,7 +182,7 @@ fn sqrt_approx(n: usize) -> usize { // Finally we note that the exponentiation / division can be done directly // with shifts. We OR with 1 to avoid zero-checks in the integer log. let ilog = (n | 1).ilog2(); - let shift = (1 + ilog) / 2; + let shift = ilog.div_ceil(2); ((1 << shift) + (n >> shift)) / 2 } diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs index 425c4eaee28..bcf886484ad 100644 --- a/library/core/src/str/iter.rs +++ b/library/core/src/str/iter.rs @@ -102,7 +102,7 @@ impl<'a> Iterator for Chars<'a> { // `(len + 3)` can't overflow, because we know that the `slice::Iter` // belongs to a slice in memory which has a maximum length of // `isize::MAX` (that's well below `usize::MAX`). - ((len + 3) / 4, Some(len)) + (len.div_ceil(4), Some(len)) } #[inline] @@ -1532,11 +1532,11 @@ impl<'a> Iterator for EncodeUtf16<'a> { // belongs to a slice in memory which has a maximum length of // `isize::MAX` (that's well below `usize::MAX`) if self.extra == 0 { - ((len + 2) / 3, Some(len)) + (len.div_ceil(3), Some(len)) } else { // We're in the middle of a surrogate pair, so add the remaining // surrogate to the bounds. - ((len + 2) / 3 + 1, Some(len + 1)) + (len.div_ceil(3) + 1, Some(len + 1)) } } } diff --git a/library/portable-simd/crates/core_simd/src/lane_count.rs b/library/portable-simd/crates/core_simd/src/lane_count.rs index 280b27bc9bc..bbdfd5f5f3e 100644 --- a/library/portable-simd/crates/core_simd/src/lane_count.rs +++ b/library/portable-simd/crates/core_simd/src/lane_count.rs @@ -8,7 +8,7 @@ pub struct LaneCount; impl LaneCount { /// The number of bytes in a bitmask with this many lanes. - pub const BITMASK_LEN: usize = (N + 7) / 8; + pub const BITMASK_LEN: usize = N.div_ceil(8); } /// Statically guarantees that a lane count is marked as supported. -- cgit 1.4.1-3-g733a5 From 93f1201c0616672d71e640a0ad600d029448c40a Mon Sep 17 00:00:00 2001 From: Edoardo Marangoni Date: Sun, 29 Jun 2025 12:11:51 +0200 Subject: compiler: Parse `p-` specs in datalayout string, allow definition of custom default data address space --- compiler/rustc_abi/src/lib.rs | 295 ++++++++++++++++++--- compiler/rustc_ast_lowering/src/format.rs | 4 +- compiler/rustc_codegen_cranelift/src/abi/mod.rs | 2 +- .../rustc_codegen_cranelift/src/abi/pass_mode.rs | 2 +- compiler/rustc_codegen_cranelift/src/common.rs | 2 +- compiler/rustc_codegen_cranelift/src/constant.rs | 2 +- compiler/rustc_codegen_gcc/src/common.rs | 2 +- compiler/rustc_codegen_gcc/src/consts.rs | 4 +- compiler/rustc_codegen_gcc/src/intrinsic/mod.rs | 2 +- compiler/rustc_codegen_gcc/src/intrinsic/simd.rs | 2 +- compiler/rustc_codegen_llvm/src/common.rs | 2 +- compiler/rustc_codegen_llvm/src/consts.rs | 11 +- compiler/rustc_codegen_llvm/src/context.rs | 2 +- .../rustc_codegen_llvm/src/debuginfo/metadata.rs | 14 +- compiler/rustc_codegen_llvm/src/intrinsic.rs | 20 +- compiler/rustc_codegen_llvm/src/lib.rs | 2 +- compiler/rustc_codegen_llvm/src/type_.rs | 4 +- compiler/rustc_codegen_llvm/src/va_arg.rs | 54 ++-- compiler/rustc_codegen_ssa/src/back/write.rs | 2 +- compiler/rustc_codegen_ssa/src/base.rs | 6 +- compiler/rustc_codegen_ssa/src/meth.rs | 8 +- compiler/rustc_codegen_ssa/src/mir/naked_asm.rs | 2 +- compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 2 +- compiler/rustc_const_eval/src/interpret/memory.rs | 4 +- compiler/rustc_errors/messages.ftl | 3 + compiler/rustc_errors/src/diagnostic_impls.rs | 4 + compiler/rustc_middle/src/mir/consts.rs | 2 +- .../rustc_middle/src/mir/interpret/allocation.rs | 6 +- .../src/mir/interpret/allocation/provenance_map.rs | 15 +- compiler/rustc_middle/src/mir/interpret/mod.rs | 4 +- compiler/rustc_middle/src/mir/interpret/pointer.rs | 2 +- compiler/rustc_middle/src/mir/interpret/value.rs | 8 +- compiler/rustc_middle/src/mir/pretty.rs | 2 +- compiler/rustc_middle/src/ty/consts/int.rs | 6 +- compiler/rustc_middle/src/ty/layout.rs | 2 +- compiler/rustc_middle/src/ty/print/pretty.rs | 2 +- compiler/rustc_middle/src/ty/vtable.rs | 4 +- compiler/rustc_mir_build/src/thir/constant.rs | 2 +- compiler/rustc_session/src/config/cfg.rs | 2 +- compiler/rustc_smir/src/rustc_smir/alloc.rs | 9 +- .../rustc_smir/src/rustc_smir/context/impls.rs | 2 +- compiler/rustc_target/src/callconv/loongarch.rs | 4 +- compiler/rustc_target/src/callconv/mips.rs | 2 +- compiler/rustc_target/src/callconv/mod.rs | 4 +- compiler/rustc_target/src/callconv/riscv.rs | 4 +- compiler/rustc_target/src/callconv/sparc.rs | 2 +- compiler/rustc_target/src/callconv/x86.rs | 2 +- compiler/rustc_target/src/spec/mod.rs | 16 +- compiler/rustc_transmute/src/layout/tree.rs | 2 +- compiler/rustc_ty_utils/src/layout.rs | 4 +- .../clippy_lints/src/casts/fn_to_numeric_cast.rs | 2 +- .../casts/fn_to_numeric_cast_with_truncation.rs | 2 +- src/tools/clippy/clippy_lints/src/casts/utils.rs | 2 +- src/tools/clippy/clippy_lints/src/enum_clike.rs | 2 +- src/tools/clippy/clippy_utils/src/consts.rs | 2 +- .../rust-analyzer/crates/hir-ty/src/layout.rs | 4 +- .../crates/hir-ty/src/layout/target.rs | 5 +- .../rust-analyzer/crates/hir-ty/src/mir/eval.rs | 2 +- 58 files changed, 416 insertions(+), 170 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 0df8921c9b7..891ac854f49 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -221,6 +221,20 @@ impl ReprOptions { /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer. pub const MAX_SIMD_LANES: u64 = 1 << 0xF; +/// How pointers are represented in a given address space +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct PointerSpec { + /// The size of the bitwise representation of the pointer. + pointer_size: Size, + /// The alignment of pointers for this address space + pointer_align: AbiAlign, + /// The size of the value a pointer can be offset by in this address space. + pointer_offset: Size, + /// Pointers into this address space contain extra metadata + /// FIXME(workingjubilee): Consider adequately reflecting this in the compiler? + _is_fat: bool, +} + /// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout) /// for a target, which contains everything needed to compute layouts. #[derive(Debug, PartialEq, Eq)] @@ -236,13 +250,22 @@ pub struct TargetDataLayout { pub f32_align: AbiAlign, pub f64_align: AbiAlign, pub f128_align: AbiAlign, - pub pointer_size: Size, - pub pointer_align: AbiAlign, pub aggregate_align: AbiAlign, /// Alignments for vector types. pub vector_align: Vec<(Size, AbiAlign)>, + pub default_address_space: AddressSpace, + pub default_address_space_pointer_spec: PointerSpec, + + /// Address space information of all known address spaces. + /// + /// # Note + /// + /// This vector does not contain the [`PointerSpec`] relative to the default address space, + /// which instead lives in [`Self::default_address_space_pointer_spec`]. + address_space_info: Vec<(AddressSpace, PointerSpec)>, + pub instruction_address_space: AddressSpace, /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32) @@ -267,14 +290,20 @@ impl Default for TargetDataLayout { f32_align: AbiAlign::new(align(32)), f64_align: AbiAlign::new(align(64)), f128_align: AbiAlign::new(align(128)), - pointer_size: Size::from_bits(64), - pointer_align: AbiAlign::new(align(64)), aggregate_align: AbiAlign { abi: align(8) }, vector_align: vec![ (Size::from_bits(64), AbiAlign::new(align(64))), (Size::from_bits(128), AbiAlign::new(align(128))), ], - instruction_address_space: AddressSpace::DATA, + default_address_space: AddressSpace::ZERO, + default_address_space_pointer_spec: PointerSpec { + pointer_size: Size::from_bits(64), + pointer_align: AbiAlign::new(align(64)), + pointer_offset: Size::from_bits(64), + _is_fat: false, + }, + address_space_info: vec![], + instruction_address_space: AddressSpace::ZERO, c_enum_min_size: Integer::I32, } } @@ -288,6 +317,7 @@ pub enum TargetDataLayoutErrors<'a> { InconsistentTargetArchitecture { dl: &'a str, target: &'a str }, InconsistentTargetPointerWidth { pointer_size: u64, target: u32 }, InvalidBitsSize { err: String }, + UnknownPointerSpecification { err: String }, } impl TargetDataLayout { @@ -298,6 +328,7 @@ impl TargetDataLayout { /// determined from llvm string. pub fn parse_from_llvm_datalayout_string<'a>( input: &'a str, + default_address_space: AddressSpace, ) -> Result> { // Parse an address space index from a string. let parse_address_space = |s: &'a str, cause: &'a str| { @@ -321,19 +352,27 @@ impl TargetDataLayout { |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits); // Parse an alignment string. - let parse_align = |s: &[&'a str], cause: &'a str| { - if s.is_empty() { - return Err(TargetDataLayoutErrors::MissingAlignment { cause }); - } + let parse_align_str = |s: &'a str, cause: &'a str| { let align_from_bits = |bits| { Align::from_bits(bits) .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err }) }; - let abi = parse_bits(s[0], "alignment", cause)?; + let abi = parse_bits(s, "alignment", cause)?; Ok(AbiAlign::new(align_from_bits(abi)?)) }; + // Parse an alignment sequence, possibly in the form `[:]`, + // ignoring the secondary alignment specifications. + let parse_align_seq = |s: &[&'a str], cause: &'a str| { + if s.is_empty() { + return Err(TargetDataLayoutErrors::MissingAlignment { cause }); + } + parse_align_str(s[0], cause) + }; + let mut dl = TargetDataLayout::default(); + dl.default_address_space = default_address_space; + let mut i128_align_src = 64; for spec in input.split('-') { let spec_parts = spec.split(':').collect::>(); @@ -344,24 +383,107 @@ impl TargetDataLayout { [p] if p.starts_with('P') => { dl.instruction_address_space = parse_address_space(&p[1..], "P")? } - ["a", a @ ..] => dl.aggregate_align = parse_align(a, "a")?, - ["f16", a @ ..] => dl.f16_align = parse_align(a, "f16")?, - ["f32", a @ ..] => dl.f32_align = parse_align(a, "f32")?, - ["f64", a @ ..] => dl.f64_align = parse_align(a, "f64")?, - ["f128", a @ ..] => dl.f128_align = parse_align(a, "f128")?, - // FIXME(erikdesjardins): we should be parsing nonzero address spaces - // this will require replacing TargetDataLayout::{pointer_size,pointer_align} - // with e.g. `fn pointer_size_in(AddressSpace)` - [p @ "p", s, a @ ..] | [p @ "p0", s, a @ ..] => { - dl.pointer_size = parse_size(s, p)?; - dl.pointer_align = parse_align(a, p)?; + ["a", a @ ..] => dl.aggregate_align = parse_align_seq(a, "a")?, + ["f16", a @ ..] => dl.f16_align = parse_align_seq(a, "f16")?, + ["f32", a @ ..] => dl.f32_align = parse_align_seq(a, "f32")?, + ["f64", a @ ..] => dl.f64_align = parse_align_seq(a, "f64")?, + ["f128", a @ ..] => dl.f128_align = parse_align_seq(a, "f128")?, + [p, s, a @ ..] if p.starts_with("p") => { + let mut p = p.strip_prefix('p').unwrap(); + let mut _is_fat = false; + + // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that + // they use 'fat' pointers. The resulting prefix may look like `pf`. + + if p.starts_with('f') { + p = p.strip_prefix('f').unwrap(); + _is_fat = true; + } + + // However, we currently don't take into account further specifications: + // an error is emitted instead. + if p.starts_with(char::is_alphabetic) { + return Err(TargetDataLayoutErrors::UnknownPointerSpecification { + err: p.to_string(), + }); + } + + let addr_space = if !p.is_empty() { + parse_address_space(p, "p-")? + } else { + AddressSpace::ZERO + }; + + let pointer_size = parse_size(s, "p-")?; + let pointer_align = parse_align_seq(a, "p-")?; + let info = PointerSpec { + pointer_offset: pointer_size, + pointer_size, + pointer_align, + _is_fat, + }; + if addr_space == default_address_space { + dl.default_address_space_pointer_spec = info; + } else { + match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) { + Some(e) => e.1 = info, + None => { + dl.address_space_info.push((addr_space, info)); + } + } + } + } + [p, s, a, _pr, i] if p.starts_with("p") => { + let mut p = p.strip_prefix('p').unwrap(); + let mut _is_fat = false; + + // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that + // they use 'fat' pointers. The resulting prefix may look like `pf`. + + if p.starts_with('f') { + p = p.strip_prefix('f').unwrap(); + _is_fat = true; + } + + // However, we currently don't take into account further specifications: + // an error is emitted instead. + if p.starts_with(char::is_alphabetic) { + return Err(TargetDataLayoutErrors::UnknownPointerSpecification { + err: p.to_string(), + }); + } + + let addr_space = if !p.is_empty() { + parse_address_space(p, "p")? + } else { + AddressSpace::ZERO + }; + + let info = PointerSpec { + pointer_size: parse_size(s, "p-")?, + pointer_align: parse_align_str(a, "p-")?, + pointer_offset: parse_size(i, "p-")?, + _is_fat, + }; + + if addr_space == default_address_space { + dl.default_address_space_pointer_spec = info; + } else { + match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) { + Some(e) => e.1 = info, + None => { + dl.address_space_info.push((addr_space, info)); + } + } + } } + [s, a @ ..] if s.starts_with('i') => { let Ok(bits) = s[1..].parse::() else { parse_size(&s[1..], "i")?; // For the user error. continue; }; - let a = parse_align(a, s)?; + let a = parse_align_seq(a, s)?; match bits { 1 => dl.i1_align = a, 8 => dl.i8_align = a, @@ -379,7 +501,7 @@ impl TargetDataLayout { } [s, a @ ..] if s.starts_with('v') => { let v_size = parse_size(&s[1..], "v")?; - let a = parse_align(a, s)?; + let a = parse_align_seq(a, s)?; if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) { v.1 = a; continue; @@ -390,10 +512,27 @@ impl TargetDataLayout { _ => {} // Ignore everything else. } } + + // Inherit, if not given, address space information for specific LLVM elements from the + // default data address space. + if (dl.instruction_address_space != dl.default_address_space) + && dl + .address_space_info + .iter() + .find(|(a, _)| *a == dl.instruction_address_space) + .is_none() + { + dl.address_space_info.push(( + dl.instruction_address_space, + dl.default_address_space_pointer_spec.clone(), + )); + } + Ok(dl) } - /// Returns **exclusive** upper bound on object size in bytes. + /// Returns **exclusive** upper bound on object size in bytes, in the default data address + /// space. /// /// The theoretical maximum object size is defined as the maximum positive `isize` value. /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly @@ -404,7 +543,26 @@ impl TargetDataLayout { /// so we adopt such a more-constrained size bound due to its technical limitations. #[inline] pub fn obj_size_bound(&self) -> u64 { - match self.pointer_size.bits() { + match self.pointer_size().bits() { + 16 => 1 << 15, + 32 => 1 << 31, + 64 => 1 << 61, + bits => panic!("obj_size_bound: unknown pointer bit size {bits}"), + } + } + + /// Returns **exclusive** upper bound on object size in bytes. + /// + /// The theoretical maximum object size is defined as the maximum positive `isize` value. + /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly + /// index every address within an object along with one byte past the end, along with allowing + /// `isize` to store the difference between any two pointers into an object. + /// + /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes, + /// so we adopt such a more-constrained size bound due to its technical limitations. + #[inline] + pub fn obj_size_bound_in(&self, address_space: AddressSpace) -> u64 { + match self.pointer_size_in(address_space).bits() { 16 => 1 << 15, 32 => 1 << 31, 64 => 1 << 61, @@ -415,7 +573,18 @@ impl TargetDataLayout { #[inline] pub fn ptr_sized_integer(&self) -> Integer { use Integer::*; - match self.pointer_size.bits() { + match self.pointer_offset().bits() { + 16 => I16, + 32 => I32, + 64 => I64, + bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"), + } + } + + #[inline] + pub fn ptr_sized_integer_in(&self, address_space: AddressSpace) -> Integer { + use Integer::*; + match self.pointer_offset_in(address_space).bits() { 16 => I16, 32 => I32, 64 => I64, @@ -439,6 +608,66 @@ impl TargetDataLayout { Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap(), )) } + + /// Get the pointer size in the default data address space. + #[inline] + pub fn pointer_size(&self) -> Size { + self.default_address_space_pointer_spec.pointer_size + } + + /// Get the pointer size in a specific address space. + #[inline] + pub fn pointer_size_in(&self, c: AddressSpace) -> Size { + if c == self.default_address_space { + return self.default_address_space_pointer_spec.pointer_size; + } + + if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) { + e.1.pointer_size + } else { + panic!("Use of unknown address space {c:?}"); + } + } + + /// Get the pointer index in the default data address space. + #[inline] + pub fn pointer_offset(&self) -> Size { + self.default_address_space_pointer_spec.pointer_offset + } + + /// Get the pointer index in a specific address space. + #[inline] + pub fn pointer_offset_in(&self, c: AddressSpace) -> Size { + if c == self.default_address_space { + return self.default_address_space_pointer_spec.pointer_offset; + } + + if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) { + e.1.pointer_offset + } else { + panic!("Use of unknown address space {c:?}"); + } + } + + /// Get the pointer alignment in the default data address space. + #[inline] + pub fn pointer_align(&self) -> AbiAlign { + self.default_address_space_pointer_spec.pointer_align + } + + /// Get the pointer alignment in a specific address space. + #[inline] + pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign { + if c == self.default_address_space { + return self.default_address_space_pointer_spec.pointer_align; + } + + if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) { + e.1.pointer_align + } else { + panic!("Use of unknown address space {c:?}"); + } + } } pub trait HasDataLayout { @@ -1101,10 +1330,7 @@ impl Primitive { match self { Int(i, _) => i.size(), Float(f) => f.size(), - // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in - // different address spaces can have different sizes - // (but TargetDataLayout doesn't currently parse that part of the DL string) - Pointer(_) => dl.pointer_size, + Pointer(a) => dl.pointer_size_in(a), } } @@ -1115,10 +1341,7 @@ impl Primitive { match self { Int(i, _) => i.align(dl), Float(f) => f.align(dl), - // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in - // different address spaces can have different alignments - // (but TargetDataLayout doesn't currently parse that part of the DL string) - Pointer(_) => dl.pointer_align, + Pointer(a) => dl.pointer_align_in(a), } } } @@ -1422,8 +1645,8 @@ impl FieldsShape { pub struct AddressSpace(pub u32); impl AddressSpace { - /// The default address space, corresponding to data space. - pub const DATA: Self = AddressSpace(0); + /// LLVM's `0` address space. + pub const ZERO: Self = AddressSpace(0); } /// The way we represent values to the backend diff --git a/compiler/rustc_ast_lowering/src/format.rs b/compiler/rustc_ast_lowering/src/format.rs index 943cde90dd2..5b1dcab87b9 100644 --- a/compiler/rustc_ast_lowering/src/format.rs +++ b/compiler/rustc_ast_lowering/src/format.rs @@ -55,7 +55,7 @@ impl<'hir> LoweringContext<'_, 'hir> { /// Get the maximum value of int_ty. It is platform-dependent due to the byte size of isize fn int_ty_max(&self, int_ty: IntTy) -> u128 { match int_ty { - IntTy::Isize => self.tcx.data_layout.pointer_size.signed_int_max() as u128, + IntTy::Isize => self.tcx.data_layout.pointer_size().signed_int_max() as u128, IntTy::I8 => i8::MAX as u128, IntTy::I16 => i16::MAX as u128, IntTy::I32 => i32::MAX as u128, @@ -67,7 +67,7 @@ impl<'hir> LoweringContext<'_, 'hir> { /// Get the maximum value of uint_ty. It is platform-dependent due to the byte size of usize fn uint_ty_max(&self, uint_ty: UintTy) -> u128 { match uint_ty { - UintTy::Usize => self.tcx.data_layout.pointer_size.unsigned_int_max(), + UintTy::Usize => self.tcx.data_layout.pointer_size().unsigned_int_max(), UintTy::U8 => u8::MAX as u128, UintTy::U16 => u16::MAX as u128, UintTy::U32 => u32::MAX as u128, diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs index 8965e4a944d..7d0731c77bd 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs @@ -786,7 +786,7 @@ pub(crate) fn codegen_drop<'tcx>( pub(crate) fn lib_call_arg_param(tcx: TyCtxt<'_>, ty: Type, is_signed: bool) -> AbiParam { let param = AbiParam::new(ty); - if ty.is_int() && u64::from(ty.bits()) < tcx.data_layout.pointer_size.bits() { + if ty.is_int() && u64::from(ty.bits()) < tcx.data_layout.pointer_size().bits() { match (&*tcx.sess.target.arch, &*tcx.sess.target.vendor) { ("x86_64", _) | ("aarch64", "apple") => match (ty, is_signed) { (types::I8 | types::I16, true) => param.sext(), diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs index cd0afee0cfb..2031842062d 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs @@ -127,7 +127,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { PassMode::Indirect { attrs, meta_attrs: None, on_stack } => { if on_stack { // Abi requires aligning struct size to pointer size - let size = self.layout.size.align_to(tcx.data_layout.pointer_align.abi); + let size = self.layout.size.align_to(tcx.data_layout.pointer_align().abi); let size = u32::try_from(size.bytes()).unwrap(); smallvec![apply_attrs_to_abi_param( AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructArgument(size),), diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs index 2f11b2d2dcc..2fbe5c02802 100644 --- a/compiler/rustc_codegen_cranelift/src/common.rs +++ b/compiler/rustc_codegen_cranelift/src/common.rs @@ -15,7 +15,7 @@ use crate::debuginfo::FunctionDebugContext; use crate::prelude::*; pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type { - match tcx.data_layout.pointer_size.bits() { + match tcx.data_layout.pointer_size().bits() { 16 => types::I16, 32 => types::I32, 64 => types::I64, diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs index ee43eb736e6..ed06423b260 100644 --- a/compiler/rustc_codegen_cranelift/src/constant.rs +++ b/compiler/rustc_codegen_cranelift/src/constant.rs @@ -443,7 +443,7 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant let addend = { let endianness = tcx.data_layout.endian; let offset = offset.bytes() as usize; - let ptr_size = tcx.data_layout.pointer_size; + let ptr_size = tcx.data_layout.pointer_size(); let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter( offset..offset + ptr_size.bytes() as usize, ); diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index dd582834fac..32713eb56c6 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -162,7 +162,7 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> { } fn const_usize(&self, i: u64) -> RValue<'gcc> { - let bit_size = self.data_layout().pointer_size.bits(); + let bit_size = self.data_layout().pointer_size().bits(); if bit_size < 64 { // make sure it doesn't overflow assert!(i < (1 << bit_size)); diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs index b43f9b24c6a..c04c75e1b11 100644 --- a/compiler/rustc_codegen_gcc/src/consts.rs +++ b/compiler/rustc_codegen_gcc/src/consts.rs @@ -294,7 +294,7 @@ pub(crate) fn const_alloc_to_gcc_uncached<'gcc>( let alloc = alloc.inner(); let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1); let dl = cx.data_layout(); - let pointer_size = dl.pointer_size.bytes() as usize; + let pointer_size = dl.pointer_size().bytes() as usize; let mut next_offset = 0; for &(offset, prov) in alloc.provenance().ptrs().iter() { @@ -331,7 +331,7 @@ pub(crate) fn const_alloc_to_gcc_uncached<'gcc>( ), abi::Scalar::Initialized { value: Primitive::Pointer(address_space), - valid_range: WrappingRange::full(dl.pointer_size), + valid_range: WrappingRange::full(dl.pointer_size()), }, cx.type_i8p_ext(address_space), )); diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index 497605978fe..0753ac1aeb8 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -541,7 +541,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), // so we re-use that same threshold here. - layout.size() <= self.data_layout().pointer_size * 2 + layout.size() <= self.data_layout().pointer_size() * 2 } }; diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs index 2e508813fc3..350915a277e 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs @@ -1184,7 +1184,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let lhs = args[0].immediate(); let rhs = args[1].immediate(); let is_add = name == sym::simd_saturating_add; - let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _; + let ptr_bits = bx.tcx().data_layout.pointer_size().bits() as _; let (signed, elem_width, elem_ty) = match *in_elem.kind() { ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_int_from_ty(i)), ty::Uint(i) => { diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 7cfab25bc50..92f38565eef 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -175,7 +175,7 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { } fn const_usize(&self, i: u64) -> &'ll Value { - let bit_size = self.data_layout().pointer_size.bits(); + let bit_size = self.data_layout().pointer_size().bits(); if bit_size < 64 { // make sure it doesn't overflow assert!(i < (1 << bit_size)); diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index a4492d76c3c..28f5282c6b0 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -43,7 +43,8 @@ pub(crate) fn const_alloc_to_llvm<'ll>( } let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1); let dl = cx.data_layout(); - let pointer_size = dl.pointer_size.bytes() as usize; + let pointer_size = dl.pointer_size(); + let pointer_size_bytes = pointer_size.bytes() as usize; // Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`, so `range` // must be within the bounds of `alloc` and not contain or overlap a pointer provenance. @@ -100,7 +101,9 @@ pub(crate) fn const_alloc_to_llvm<'ll>( // This `inspect` is okay since it is within the bounds of the allocation, it doesn't // affect interpreter execution (we inspect the result after interpreter execution), // and we properly interpret the provenance as a relocation pointer offset. - alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)), + alloc.inspect_with_uninit_and_ptr_outside_interpreter( + offset..(offset + pointer_size_bytes), + ), ) .expect("const_alloc_to_llvm: could not read relocation pointer") as u64; @@ -111,11 +114,11 @@ pub(crate) fn const_alloc_to_llvm<'ll>( InterpScalar::from_pointer(Pointer::new(prov, Size::from_bytes(ptr_offset)), &cx.tcx), Scalar::Initialized { value: Primitive::Pointer(address_space), - valid_range: WrappingRange::full(dl.pointer_size), + valid_range: WrappingRange::full(pointer_size), }, cx.type_ptr_ext(address_space), )); - next_offset = offset + pointer_size; + next_offset = offset + pointer_size_bytes; } if alloc.len() >= next_offset { let range = next_offset..alloc.len(); diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 0324dff6ff2..90582e23b04 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -605,7 +605,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { GenericCx( FullCx { tcx, - scx: SimpleCx::new(llmod, llcx, tcx.data_layout.pointer_size), + scx: SimpleCx::new(llmod, llcx, tcx.data_layout.pointer_size()), use_dll_storage_attrs, tls_model, codegen_unit, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 7f3e486ca31..9b4736e50e6 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -159,13 +159,15 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>( return_if_di_node_created_in_meantime!(cx, unique_type_id); let data_layout = &cx.tcx.data_layout; + let pointer_size = data_layout.pointer_size(); + let pointer_align = data_layout.pointer_align(); let ptr_type_debuginfo_name = compute_debuginfo_type_name(cx.tcx, ptr_type, true); match wide_pointer_kind(cx, pointee_type) { None => { // This is a thin pointer. Create a regular pointer type and give it the correct name. assert_eq!( - (data_layout.pointer_size, data_layout.pointer_align.abi), + (pointer_size, pointer_align.abi), cx.size_and_align_of(ptr_type), "ptr_type={ptr_type}, pointee_type={pointee_type}", ); @@ -174,8 +176,8 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>( llvm::LLVMRustDIBuilderCreatePointerType( DIB(cx), pointee_type_di_node, - data_layout.pointer_size.bits(), - data_layout.pointer_align.abi.bits() as u32, + pointer_size.bits(), + pointer_align.abi.bits() as u32, 0, // Ignore DWARF address space. ptr_type_debuginfo_name.as_c_char_ptr(), ptr_type_debuginfo_name.len(), @@ -319,7 +321,9 @@ fn build_subroutine_type_di_node<'ll, 'tcx>( let name = compute_debuginfo_type_name(cx.tcx, fn_ty, false); let (size, align) = match fn_ty.kind() { ty::FnDef(..) => (Size::ZERO, Align::ONE), - ty::FnPtr(..) => (cx.tcx.data_layout.pointer_size, cx.tcx.data_layout.pointer_align.abi), + ty::FnPtr(..) => { + (cx.tcx.data_layout.pointer_size(), cx.tcx.data_layout.pointer_align().abi) + } _ => unreachable!(), }; let di_node = unsafe { @@ -504,7 +508,7 @@ fn recursion_marker_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll D create_basic_type( cx, "", - cx.tcx.data_layout.pointer_size, + cx.tcx.data_layout.pointer_size(), dwarf_const::DW_ATE_unsigned, ) }) diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 35922c100cd..fcc0d378f06 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -458,7 +458,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), // so we re-use that same threshold here. - layout.size() <= self.data_layout().pointer_size * 2 + layout.size() <= self.data_layout().pointer_size() * 2 } }; @@ -758,8 +758,8 @@ fn codegen_msvc_try<'ll, 'tcx>( // } // // More information can be found in libstd's seh.rs implementation. - let ptr_size = bx.tcx().data_layout.pointer_size; - let ptr_align = bx.tcx().data_layout.pointer_align.abi; + let ptr_size = bx.tcx().data_layout.pointer_size(); + let ptr_align = bx.tcx().data_layout.pointer_align().abi; let slot = bx.alloca(ptr_size, ptr_align); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None); @@ -1031,8 +1031,8 @@ fn codegen_emcc_try<'ll, 'tcx>( // We need to pass two values to catch_func (ptr and is_rust_panic), so // create an alloca and pass a pointer to that. - let ptr_size = bx.tcx().data_layout.pointer_size; - let ptr_align = bx.tcx().data_layout.pointer_align.abi; + let ptr_size = bx.tcx().data_layout.pointer_size(); + let ptr_align = bx.tcx().data_layout.pointer_align().abi; let i8_align = bx.tcx().data_layout.i8_align.abi; // Required in order for there to be no padding between the fields. assert!(i8_align <= ptr_align); @@ -1158,9 +1158,11 @@ fn generic_simd_intrinsic<'ll, 'tcx>( macro_rules! require_int_or_uint_ty { ($ty: expr, $diag: expr) => { match $ty { - ty::Int(i) => i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()), + ty::Int(i) => { + i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits()) + } ty::Uint(i) => { - i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()) + i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits()) } _ => { return_error!($diag); @@ -2014,10 +2016,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } else { let bitwidth = match in_elem.kind() { ty::Int(i) => { - i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()) + i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits()) } ty::Uint(i) => { - i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()) + i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits()) } _ => return_error!(InvalidMonomorphization::UnsupportedSymbol { span, diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index cdfffbe47bf..63ca51b006d 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -113,7 +113,7 @@ impl ExtraBackendMethods for LlvmCodegenBackend { ) -> ModuleLlvm { let module_llvm = ModuleLlvm::new_metadata(tcx, module_name); let cx = - SimpleCx::new(module_llvm.llmod(), &module_llvm.llcx, tcx.data_layout.pointer_size); + SimpleCx::new(module_llvm.llmod(), &module_llvm.llcx, tcx.data_layout.pointer_size()); unsafe { allocator::codegen(tcx, cx, module_name, kind, alloc_error_handler_kind); } diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index 453eca2bbe1..ee472e75ed4 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -208,7 +208,7 @@ impl<'ll, CX: Borrow>> BaseTypeCodegenMethods for GenericCx<'ll, CX> { } fn type_ptr(&self) -> &'ll Type { - self.type_ptr_ext(AddressSpace::DATA) + self.type_ptr_ext(AddressSpace::ZERO) } fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type { @@ -258,7 +258,7 @@ impl Type { } pub(crate) fn ptr_llcx(llcx: &llvm::Context) -> &Type { - unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::DATA.0) } + unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::ZERO.0) } } } diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index 4fe4c9bcbf2..b52847f5bcf 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -45,7 +45,8 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( let va_list_ty = bx.type_ptr(); let va_list_addr = list.immediate(); - let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi); + let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi; + let ptr = bx.load(va_list_ty, va_list_addr, ptr_align_abi); let (addr, addr_align) = if allow_higher_align && align > slot_size { (round_pointer_up_to_alignment(bx, ptr, align, bx.type_ptr()), align) @@ -56,7 +57,7 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( let aligned_size = size.align_to(slot_size).bytes() as i32; let full_direct_size = bx.cx().const_i32(aligned_size); let next = bx.inbounds_ptradd(addr, full_direct_size); - bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi); + bx.store(next, va_list_addr, ptr_align_abi); if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big @@ -108,8 +109,8 @@ fn emit_ptr_va_arg<'ll, 'tcx>( let (llty, size, align) = if indirect { ( bx.cx.layout_of(Ty::new_imm_ptr(bx.cx.tcx, target_ty)).llvm_type(bx.cx), - bx.cx.data_layout().pointer_size, - bx.cx.data_layout().pointer_align, + bx.cx.data_layout().pointer_size(), + bx.cx.data_layout().pointer_align(), ) } else { (layout.llvm_type(bx.cx), layout.size, layout.align) @@ -204,7 +205,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( bx.switch_to_block(in_reg); let top_type = bx.type_ptr(); - let top = bx.load(top_type, reg_top, dl.pointer_align.abi); + let top = bx.load(top_type, reg_top, dl.pointer_align().abi); // reg_value = *(@top + reg_off_v); let mut reg_addr = bx.ptradd(top, reg_off_v); @@ -297,6 +298,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( let max_regs = 8u8; let use_regs = bx.icmp(IntPredicate::IntULT, num_regs, bx.const_u8(max_regs)); + let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi; let in_reg = bx.append_sibling_block("va_arg.in_reg"); let in_mem = bx.append_sibling_block("va_arg.in_mem"); @@ -308,7 +310,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( bx.switch_to_block(in_reg); let reg_safe_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2 + 4)); - let mut reg_addr = bx.load(bx.type_ptr(), reg_safe_area_ptr, dl.pointer_align.abi); + let mut reg_addr = bx.load(bx.type_ptr(), reg_safe_area_ptr, ptr_align_abi); // Floating-point registers start after the general-purpose registers. if !is_int && !is_soft_float_abi { @@ -342,11 +344,11 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( let size = if !is_indirect { layout.layout.size.align_to(overflow_area_align) } else { - dl.pointer_size + dl.pointer_size() }; let overflow_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2)); - let mut overflow_area = bx.load(bx.type_ptr(), overflow_area_ptr, dl.pointer_align.abi); + let mut overflow_area = bx.load(bx.type_ptr(), overflow_area_ptr, ptr_align_abi); // Round up address of argument to alignment if layout.layout.align.abi > overflow_area_align { @@ -362,7 +364,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( // Increase the overflow area. overflow_area = bx.inbounds_ptradd(overflow_area, bx.const_usize(size.bytes())); - bx.store(overflow_area, overflow_area_ptr, dl.pointer_align.abi); + bx.store(overflow_area, overflow_area_ptr, ptr_align_abi); bx.br(end); @@ -373,11 +375,8 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( bx.switch_to_block(end); let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]); let val_type = layout.llvm_type(bx); - let val_addr = if is_indirect { - bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi) - } else { - val_addr - }; + let val_addr = + if is_indirect { bx.load(bx.cx.type_ptr(), val_addr, ptr_align_abi) } else { val_addr }; bx.load(val_type, val_addr, layout.align.abi) } @@ -414,6 +413,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>( let in_reg = bx.append_sibling_block("va_arg.in_reg"); let in_mem = bx.append_sibling_block("va_arg.in_mem"); let end = bx.append_sibling_block("va_arg.end"); + let ptr_align_abi = dl.pointer_align().abi; // FIXME: vector ABI not yet supported. let target_ty_size = bx.cx.size_of(target_ty).bytes(); @@ -435,7 +435,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>( bx.switch_to_block(in_reg); // Work out the address of the value in the register save area. - let reg_ptr_v = bx.load(bx.type_ptr(), reg_save_area, dl.pointer_align.abi); + let reg_ptr_v = bx.load(bx.type_ptr(), reg_save_area, ptr_align_abi); let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8)); let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding)); let reg_addr = bx.ptradd(reg_ptr_v, reg_off); @@ -449,15 +449,14 @@ fn emit_s390x_va_arg<'ll, 'tcx>( bx.switch_to_block(in_mem); // Work out the address of the value in the argument overflow area. - let arg_ptr_v = - bx.load(bx.type_ptr(), overflow_arg_area, bx.tcx().data_layout.pointer_align.abi); + let arg_ptr_v = bx.load(bx.type_ptr(), overflow_arg_area, ptr_align_abi); let arg_off = bx.const_u64(padding); let mem_addr = bx.ptradd(arg_ptr_v, arg_off); // Update the argument overflow area pointer. let arg_size = bx.cx().const_u64(padded_size); let new_arg_ptr_v = bx.inbounds_ptradd(arg_ptr_v, arg_size); - bx.store(new_arg_ptr_v, overflow_arg_area, dl.pointer_align.abi); + bx.store(new_arg_ptr_v, overflow_arg_area, ptr_align_abi); bx.br(end); // Return the appropriate result. @@ -465,7 +464,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>( let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]); let val_type = layout.llvm_type(bx); let val_addr = - if indirect { bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi) } else { val_addr }; + if indirect { bx.load(bx.cx.type_ptr(), val_addr, ptr_align_abi) } else { val_addr }; bx.load(val_type, val_addr, layout.align.abi) } @@ -607,7 +606,7 @@ fn emit_x86_64_sysv64_va_arg<'ll, 'tcx>( // loads than necessary. Can we clean this up? let reg_save_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * unsigned_int_offset + ptr_offset)); - let reg_save_area_v = bx.load(bx.type_ptr(), reg_save_area_ptr, dl.pointer_align.abi); + let reg_save_area_v = bx.load(bx.type_ptr(), reg_save_area_ptr, dl.pointer_align().abi); let reg_addr = match layout.layout.backend_repr() { BackendRepr::Scalar(scalar) => match scalar.primitive() { @@ -749,10 +748,11 @@ fn x86_64_sysv64_va_arg_from_memory<'ll, 'tcx>( layout: TyAndLayout<'tcx, Ty<'tcx>>, ) -> &'ll Value { let dl = bx.cx.data_layout(); + let ptr_align_abi = dl.data_layout().pointer_align().abi; let overflow_arg_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.const_usize(8)); - let overflow_arg_area_v = bx.load(bx.type_ptr(), overflow_arg_area_ptr, dl.pointer_align.abi); + let overflow_arg_area_v = bx.load(bx.type_ptr(), overflow_arg_area_ptr, ptr_align_abi); // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 // byte boundary if alignment needed by type exceeds 8 byte boundary. // It isn't stated explicitly in the standard, but in practice we use @@ -771,7 +771,7 @@ fn x86_64_sysv64_va_arg_from_memory<'ll, 'tcx>( let size_in_bytes = layout.layout.size().bytes(); let offset = bx.const_i32(size_in_bytes.next_multiple_of(8) as i32); let overflow_arg_area = bx.inbounds_ptradd(overflow_arg_area_v, offset); - bx.store(overflow_arg_area, overflow_arg_area_ptr, dl.pointer_align.abi); + bx.store(overflow_arg_area, overflow_arg_area_ptr, ptr_align_abi); mem_addr } @@ -803,6 +803,7 @@ fn emit_xtensa_va_arg<'ll, 'tcx>( let from_stack = bx.append_sibling_block("va_arg.from_stack"); let from_regsave = bx.append_sibling_block("va_arg.from_regsave"); let end = bx.append_sibling_block("va_arg.end"); + let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi; // (*va).va_ndx let va_reg_offset = 4; @@ -825,12 +826,11 @@ fn emit_xtensa_va_arg<'ll, 'tcx>( bx.switch_to_block(from_regsave); // update va_ndx - bx.store(offset_next, offset_ptr, bx.tcx().data_layout.pointer_align.abi); + bx.store(offset_next, offset_ptr, ptr_align_abi); // (*va).va_reg let regsave_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(va_reg_offset)); - let regsave_area = - bx.load(bx.type_ptr(), regsave_area_ptr, bx.tcx().data_layout.pointer_align.abi); + let regsave_area = bx.load(bx.type_ptr(), regsave_area_ptr, ptr_align_abi); let regsave_value_ptr = bx.inbounds_ptradd(regsave_area, offset); bx.br(end); @@ -849,11 +849,11 @@ fn emit_xtensa_va_arg<'ll, 'tcx>( // va_ndx = offset_next_corrected; let offset_next_corrected = bx.add(offset_next, bx.const_i32(slot_size)); // update va_ndx - bx.store(offset_next_corrected, offset_ptr, bx.tcx().data_layout.pointer_align.abi); + bx.store(offset_next_corrected, offset_ptr, ptr_align_abi); // let stack_value_ptr = unsafe { (*va).va_stk.byte_add(offset_corrected) }; let stack_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(0)); - let stack_area = bx.load(bx.type_ptr(), stack_area_ptr, bx.tcx().data_layout.pointer_align.abi); + let stack_area = bx.load(bx.type_ptr(), stack_area_ptr, ptr_align_abi); let stack_value_ptr = bx.inbounds_ptradd(stack_area, offset_corrected); bx.br(end); diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index 8330e4f7af0..d2a64ec2993 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -1208,7 +1208,7 @@ fn start_executing_work( split_debuginfo: tcx.sess.split_debuginfo(), split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind, parallel: backend.supports_parallel() && !sess.opts.unstable_opts.no_parallel_backend, - pointer_size: tcx.data_layout.pointer_size, + pointer_size: tcx.data_layout.pointer_size(), invocation_temp: sess.invocation_temp.clone(), }; diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 102d4ea2fa6..18581f854b6 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -200,7 +200,7 @@ fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let vptr_entry_idx = cx.tcx().supertrait_vtable_slot((source, target)); if let Some(entry_idx) = vptr_entry_idx { - let ptr_size = bx.data_layout().pointer_size; + let ptr_size = bx.data_layout().pointer_size(); let vtable_byte_offset = u64::try_from(entry_idx).unwrap() * ptr_size.bytes(); load_vtable(bx, old_info, bx.type_ptr(), vtable_byte_offset, source, true) } else { @@ -577,8 +577,8 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(bx: &mut Bx) -> (Bx::Va // Params for UEFI let param_handle = bx.get_param(0); let param_system_table = bx.get_param(1); - let ptr_size = bx.tcx().data_layout.pointer_size; - let ptr_align = bx.tcx().data_layout.pointer_align.abi; + let ptr_size = bx.tcx().data_layout.pointer_size(); + let ptr_align = bx.tcx().data_layout.pointer_align().abi; let arg_argc = bx.const_int(bx.cx().type_isize(), 2); let arg_argv = bx.alloca(2 * ptr_size, ptr_align); bx.store(param_handle, arg_argv, ptr_align); diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs index 3a11ce6befb..2aa5c3c27ea 100644 --- a/compiler/rustc_codegen_ssa/src/meth.rs +++ b/compiler/rustc_codegen_ssa/src/meth.rs @@ -27,7 +27,7 @@ impl<'a, 'tcx> VirtualIndex { debug!("get_fn({llvtable:?}, {ty:?}, {self:?})"); let llty = bx.fn_ptr_backend_type(fn_abi); - let ptr_size = bx.data_layout().pointer_size; + let ptr_size = bx.data_layout().pointer_size(); let vtable_byte_offset = self.0 * ptr_size.bytes(); load_vtable(bx, llvtable, llty, vtable_byte_offset, ty, nonnull) @@ -63,7 +63,7 @@ impl<'a, 'tcx> VirtualIndex { debug!("get_int({:?}, {:?})", llvtable, self); let llty = bx.type_isize(); - let ptr_size = bx.data_layout().pointer_size; + let ptr_size = bx.data_layout().pointer_size(); let vtable_byte_offset = self.0 * ptr_size.bytes(); load_vtable(bx, llvtable, llty, vtable_byte_offset, ty, false) @@ -115,7 +115,7 @@ pub(crate) fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>( let vtable_alloc_id = tcx.vtable_allocation((ty, trait_ref)); let vtable_allocation = tcx.global_alloc(vtable_alloc_id).unwrap_memory(); let vtable_const = cx.const_data_from_alloc(vtable_allocation); - let align = cx.data_layout().pointer_align.abi; + let align = cx.data_layout().pointer_align().abi; let vtable = cx.static_addr_of(vtable_const, align, Some("vtable")); cx.apply_vcall_visibility_metadata(ty, trait_ref, vtable); @@ -133,7 +133,7 @@ pub(crate) fn load_vtable<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( ty: Ty<'tcx>, nonnull: bool, ) -> Bx::Value { - let ptr_align = bx.data_layout().pointer_align.abi; + let ptr_align = bx.data_layout().pointer_align().abi; if bx.cx().sess().opts.unstable_opts.virtual_function_elimination && bx.cx().sess().lto() == Lto::Fat diff --git a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs index 9da4b8cc8fd..beaf8950978 100644 --- a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs +++ b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs @@ -326,7 +326,7 @@ fn prefix_and_suffix<'tcx>( fn wasm_functype<'tcx>(tcx: TyCtxt<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> String { let mut signature = String::with_capacity(64); - let ptr_type = match tcx.data_layout.pointer_size.bits() { + let ptr_type = match tcx.data_layout.pointer_size().bits() { 32 => "i32", 64 => "i64", other => bug!("wasm pointer size cannot be {other} bits"), diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index a8f9cbbe19b..bb9a8b4e49a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -1189,7 +1189,7 @@ fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let range = scalar.valid_range(bx.cx()); bx.assume_integer_range(imm, backend_ty, range); } - abi::Primitive::Pointer(abi::AddressSpace::DATA) + abi::Primitive::Pointer(abi::AddressSpace::ZERO) if !scalar.valid_range(bx.cx()).contains(0) => { bx.assume_nonnull(imm); diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index ff822b52a8d..541eede59db 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -1233,7 +1233,7 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> /// `offset` is relative to this allocation reference, not the base of the allocation. pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar) -> InterpResult<'tcx> { - self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val) + self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size()), val) } /// Mark the given sub-range (relative to this allocation reference) as uninitialized. @@ -1285,7 +1285,7 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Pr /// `offset` is relative to this allocation reference, not the base of the allocation. pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar> { self.read_scalar( - alloc_range(offset, self.tcx.data_layout().pointer_size), + alloc_range(offset, self.tcx.data_layout().pointer_size()), /*read_provenance*/ true, ) } diff --git a/compiler/rustc_errors/messages.ftl b/compiler/rustc_errors/messages.ftl index d68dba0be5e..ad2e206260d 100644 --- a/compiler/rustc_errors/messages.ftl +++ b/compiler/rustc_errors/messages.ftl @@ -41,5 +41,8 @@ errors_target_invalid_bits = errors_target_invalid_bits_size = {$err} +errors_target_invalid_datalayout_pointer_spec = + unknown pointer specification `{$err}` in datalayout string + errors_target_missing_alignment = missing alignment for `{$cause}` in "data-layout" diff --git a/compiler/rustc_errors/src/diagnostic_impls.rs b/compiler/rustc_errors/src/diagnostic_impls.rs index 8b59ba9984c..eeb9ac28808 100644 --- a/compiler/rustc_errors/src/diagnostic_impls.rs +++ b/compiler/rustc_errors/src/diagnostic_impls.rs @@ -374,6 +374,10 @@ impl Diagnostic<'_, G> for TargetDataLayoutErrors<'_> { TargetDataLayoutErrors::InvalidBitsSize { err } => { Diag::new(dcx, level, fluent::errors_target_invalid_bits_size).with_arg("err", err) } + TargetDataLayoutErrors::UnknownPointerSpecification { err } => { + Diag::new(dcx, level, fluent::errors_target_invalid_datalayout_pointer_spec) + .with_arg("err", err) + } } } } diff --git a/compiler/rustc_middle/src/mir/consts.rs b/compiler/rustc_middle/src/mir/consts.rs index 16edc240544..fb941977528 100644 --- a/compiler/rustc_middle/src/mir/consts.rs +++ b/compiler/rustc_middle/src/mir/consts.rs @@ -142,7 +142,7 @@ impl<'tcx> ConstValue<'tcx> { // The reference itself is stored behind an indirection. // Load the reference, and then load the actual slice contents. let a = tcx.global_alloc(alloc_id).unwrap_memory().inner(); - let ptr_size = tcx.data_layout.pointer_size; + let ptr_size = tcx.data_layout.pointer_size(); if a.size() < offset + 2 * ptr_size { // (partially) dangling reference return None; diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index d2cadc96b63..f039849d1bb 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -519,7 +519,7 @@ impl Allocation { let mut bytes = alloc_bytes(&*self.bytes, self.align)?; // Adjust provenance of pointers stored in this allocation. let mut new_provenance = Vec::with_capacity(self.provenance.ptrs().len()); - let ptr_size = cx.data_layout().pointer_size.bytes_usize(); + let ptr_size = cx.data_layout().pointer_size().bytes_usize(); let endian = cx.data_layout().endian; for &(offset, alloc_id) in self.provenance.ptrs().iter() { let idx = offset.bytes_usize(); @@ -709,7 +709,7 @@ impl Allocation let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap(); if read_provenance { - assert_eq!(range.size, cx.data_layout().pointer_size); + assert_eq!(range.size, cx.data_layout().pointer_size()); // When reading data with provenance, the easy case is finding provenance exactly where we // are reading, then we can put data and provenance back together and return that. @@ -782,7 +782,7 @@ impl Allocation // See if we have to also store some provenance. if let Some(provenance) = provenance { - assert_eq!(range.size, cx.data_layout().pointer_size); + assert_eq!(range.size, cx.data_layout().pointer_size()); self.provenance.insert_ptr(range.start, provenance, cx); } diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs index 63608947eb3..9c6e1664386 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs @@ -71,7 +71,7 @@ impl ProvenanceMap { // We have to go back `pointer_size - 1` bytes, as that one would still overlap with // the beginning of this range. let adjusted_start = Size::from_bytes( - range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1), + range.start.bytes().saturating_sub(cx.data_layout().pointer_size().bytes() - 1), ); adjusted_start..range.end() } @@ -142,7 +142,7 @@ impl ProvenanceMap { } pub fn insert_ptr(&mut self, offset: Size, prov: Prov, cx: &impl HasDataLayout) { - debug_assert!(self.range_empty(alloc_range(offset, cx.data_layout().pointer_size), cx)); + debug_assert!(self.range_empty(alloc_range(offset, cx.data_layout().pointer_size()), cx)); self.ptrs.insert(offset, prov); } @@ -160,6 +160,8 @@ impl ProvenanceMap { debug_assert!(self.bytes.is_none()); } + let pointer_size = cx.data_layout().pointer_size(); + // For the ptr-sized part, find the first (inclusive) and last (exclusive) byte of // provenance that overlaps with the given range. let (first, last) = { @@ -172,10 +174,7 @@ impl ProvenanceMap { // This redoes some of the work of `range_get_ptrs_is_empty`, but this path is much // colder than the early return above, so it's worth it. let provenance = self.range_ptrs_get(range, cx); - ( - provenance.first().unwrap().0, - provenance.last().unwrap().0 + cx.data_layout().pointer_size, - ) + (provenance.first().unwrap().0, provenance.last().unwrap().0 + pointer_size) }; // We need to handle clearing the provenance from parts of a pointer. @@ -192,7 +191,7 @@ impl ProvenanceMap { } } if last > end { - let begin_of_last = last - cx.data_layout().pointer_size; + let begin_of_last = last - pointer_size; if !Prov::OFFSET_IS_ADDR { // We can't split up the provenance into less than a pointer. return Err(AllocError::OverwritePartialPointer(begin_of_last)); @@ -255,7 +254,7 @@ impl ProvenanceMap { // shift offsets from source allocation to destination allocation (offset - src.start) + dest_offset // `Size` operations }; - let ptr_size = cx.data_layout().pointer_size; + let ptr_size = cx.data_layout().pointer_size(); // # Pointer-sized provenances // Get the provenances that are entirely within this range. diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs index da9e5bdbadd..0b2645013ba 100644 --- a/compiler/rustc_middle/src/mir/interpret/mod.rs +++ b/compiler/rustc_middle/src/mir/interpret/mod.rs @@ -297,7 +297,7 @@ impl<'tcx> GlobalAlloc<'tcx> { match self { GlobalAlloc::Function { .. } => cx.data_layout().instruction_address_space, GlobalAlloc::Static(..) | GlobalAlloc::Memory(..) | GlobalAlloc::VTable(..) => { - AddressSpace::DATA + AddressSpace::ZERO } } } @@ -380,7 +380,7 @@ impl<'tcx> GlobalAlloc<'tcx> { GlobalAlloc::Function { .. } => (Size::ZERO, Align::ONE), GlobalAlloc::VTable(..) => { // No data to be accessed here. But vtables are pointer-aligned. - return (Size::ZERO, tcx.data_layout.pointer_align.abi); + return (Size::ZERO, tcx.data_layout.pointer_align().abi); } } } diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs index 0ff14f15c13..e25ff7651f6 100644 --- a/compiler/rustc_middle/src/mir/interpret/pointer.rs +++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs @@ -16,7 +16,7 @@ pub trait PointerArithmetic: HasDataLayout { #[inline(always)] fn pointer_size(&self) -> Size { - self.data_layout().pointer_size + self.data_layout().pointer_size() } #[inline(always)] diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs index 8092f634dc8..90df29bb7e3 100644 --- a/compiler/rustc_middle/src/mir/interpret/value.rs +++ b/compiler/rustc_middle/src/mir/interpret/value.rs @@ -167,7 +167,7 @@ impl Scalar { #[inline] pub fn from_target_usize(i: u64, cx: &impl HasDataLayout) -> Self { - Self::from_uint(i, cx.data_layout().pointer_size) + Self::from_uint(i, cx.data_layout().pointer_offset()) } #[inline] @@ -205,7 +205,7 @@ impl Scalar { #[inline] pub fn from_target_isize(i: i64, cx: &impl HasDataLayout) -> Self { - Self::from_int(i, cx.data_layout().pointer_size) + Self::from_int(i, cx.data_layout().pointer_offset()) } #[inline] @@ -393,7 +393,7 @@ impl<'tcx, Prov: Provenance> Scalar { /// Converts the scalar to produce a machine-pointer-sized unsigned integer. /// Fails if the scalar is a pointer. pub fn to_target_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> { - let b = self.to_uint(cx.data_layout().pointer_size)?; + let b = self.to_uint(cx.data_layout().pointer_size())?; interp_ok(u64::try_from(b).unwrap()) } @@ -433,7 +433,7 @@ impl<'tcx, Prov: Provenance> Scalar { /// Converts the scalar to produce a machine-pointer-sized signed integer. /// Fails if the scalar is a pointer. pub fn to_target_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> { - let b = self.to_int(cx.data_layout().pointer_size)?; + let b = self.to_int(cx.data_layout().pointer_size())?; interp_ok(i64::try_from(b).unwrap()) } diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs index 6b262a27500..e9f3fb6ac8d 100644 --- a/compiler/rustc_middle/src/mir/pretty.rs +++ b/compiler/rustc_middle/src/mir/pretty.rs @@ -1753,7 +1753,7 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>( let mut i = Size::ZERO; let mut line_start = Size::ZERO; - let ptr_size = tcx.data_layout.pointer_size; + let ptr_size = tcx.data_layout.pointer_size(); let mut ascii = String::new(); diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs index b087ae25486..6ee76b94507 100644 --- a/compiler/rustc_middle/src/ty/consts/int.rs +++ b/compiler/rustc_middle/src/ty/consts/int.rs @@ -252,7 +252,7 @@ impl ScalarInt { #[inline] pub fn try_from_target_usize(i: impl Into, tcx: TyCtxt<'_>) -> Option { - Self::try_from_uint(i, tcx.data_layout.pointer_size) + Self::try_from_uint(i, tcx.data_layout.pointer_size()) } /// Try to convert this ScalarInt to the raw underlying bits. @@ -328,7 +328,7 @@ impl ScalarInt { #[inline] pub fn to_target_usize(&self, tcx: TyCtxt<'_>) -> u64 { - self.to_uint(tcx.data_layout.pointer_size).try_into().unwrap() + self.to_uint(tcx.data_layout.pointer_size()).try_into().unwrap() } #[inline] @@ -402,7 +402,7 @@ impl ScalarInt { #[inline] pub fn to_target_isize(&self, tcx: TyCtxt<'_>) -> i64 { - self.to_int(tcx.data_layout.pointer_size).try_into().unwrap() + self.to_int(tcx.data_layout.pointer_size()).try_into().unwrap() } #[inline] diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 09379d9d805..809717513c7 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -1067,7 +1067,7 @@ where if let Some(variant) = data_variant { // FIXME(erikdesjardins): handle non-default addrspace ptr sizes // (requires passing in the expected address space from the caller) - let ptr_end = offset + Primitive::Pointer(AddressSpace::DATA).size(cx); + let ptr_end = offset + Primitive::Pointer(AddressSpace::ZERO).size(cx); for i in 0..variant.fields.count() { let field_start = variant.fields.offset(i); if field_start <= offset { diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index b4c4f48a0a6..19f2258fe99 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -1848,7 +1848,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { } // Pointer types ty::Ref(..) | ty::RawPtr(_, _) | ty::FnPtr(..) => { - let data = int.to_bits(self.tcx().data_layout.pointer_size); + let data = int.to_bits(self.tcx().data_layout.pointer_size()); self.typed_value( |this| { write!(this, "0x{data:x}")?; diff --git a/compiler/rustc_middle/src/ty/vtable.rs b/compiler/rustc_middle/src/ty/vtable.rs index 74b6a840a2e..6fc19c82342 100644 --- a/compiler/rustc_middle/src/ty/vtable.rs +++ b/compiler/rustc_middle/src/ty/vtable.rs @@ -106,8 +106,8 @@ pub(super) fn vtable_allocation_provider<'tcx>( let size = layout.size.bytes(); let align = layout.align.abi.bytes(); - let ptr_size = tcx.data_layout.pointer_size; - let ptr_align = tcx.data_layout.pointer_align.abi; + let ptr_size = tcx.data_layout.pointer_size(); + let ptr_align = tcx.data_layout.pointer_align().abi; let vtable_size = ptr_size * u64::try_from(vtable_entries.len()).unwrap(); let mut vtable = Allocation::new(vtable_size, ptr_align, AllocInit::Uninit, ()); diff --git a/compiler/rustc_mir_build/src/thir/constant.rs b/compiler/rustc_mir_build/src/thir/constant.rs index 8e218a380e9..52e6f2d3e1a 100644 --- a/compiler/rustc_mir_build/src/thir/constant.rs +++ b/compiler/rustc_mir_build/src/thir/constant.rs @@ -20,7 +20,7 @@ pub(crate) fn lit_to_const<'tcx>( let trunc = |n, width: ty::UintTy| { let width = width - .normalize(tcx.data_layout.pointer_size.bits().try_into().unwrap()) + .normalize(tcx.data_layout.pointer_size().bits().try_into().unwrap()) .bit_width() .unwrap(); let width = Size::from_bits(width); diff --git a/compiler/rustc_session/src/config/cfg.rs b/compiler/rustc_session/src/config/cfg.rs index cbfe9e0da6a..62891eb4f26 100644 --- a/compiler/rustc_session/src/config/cfg.rs +++ b/compiler/rustc_session/src/config/cfg.rs @@ -278,7 +278,7 @@ pub(crate) fn default_configuration(sess: &Session) -> Cfg { }; insert_atomic(sym::integer(i), align); if sess.target.pointer_width as u64 == i { - insert_atomic(sym::ptr, layout.pointer_align.abi); + insert_atomic(sym::ptr, layout.pointer_align().abi); } } } diff --git a/compiler/rustc_smir/src/rustc_smir/alloc.rs b/compiler/rustc_smir/src/rustc_smir/alloc.rs index ecaf3571896..acd805e3574 100644 --- a/compiler/rustc_smir/src/rustc_smir/alloc.rs +++ b/compiler/rustc_smir/src/rustc_smir/alloc.rs @@ -47,15 +47,12 @@ pub fn try_new_slice<'tcx, B: Bridge>( let scalar_ptr = Scalar::from_pointer(ptr, &cx.tcx); let scalar_meta: Scalar = Scalar::from_target_usize(meta, &cx.tcx); let mut allocation = Allocation::new(layout.size, layout.align.abi, AllocInit::Uninit, ()); + let ptr_size = cx.tcx.data_layout.pointer_size(); allocation - .write_scalar(&cx.tcx, alloc_range(Size::ZERO, cx.tcx.data_layout.pointer_size), scalar_ptr) + .write_scalar(&cx.tcx, alloc_range(Size::ZERO, ptr_size), scalar_ptr) .map_err(|e| B::Error::from_internal(e))?; allocation - .write_scalar( - &cx.tcx, - alloc_range(cx.tcx.data_layout.pointer_size, scalar_meta.size()), - scalar_meta, - ) + .write_scalar(&cx.tcx, alloc_range(ptr_size, scalar_meta.size()), scalar_meta) .map_err(|e| B::Error::from_internal(e))?; Ok(allocation) diff --git a/compiler/rustc_smir/src/rustc_smir/context/impls.rs b/compiler/rustc_smir/src/rustc_smir/context/impls.rs index 89ae47143ef..56021d93714 100644 --- a/compiler/rustc_smir/src/rustc_smir/context/impls.rs +++ b/compiler/rustc_smir/src/rustc_smir/context/impls.rs @@ -112,7 +112,7 @@ impl<'tcx, B: Bridge> SmirCtxt<'tcx, B> { } pub fn target_pointer_size(&self) -> usize { - self.tcx.data_layout.pointer_size.bits().try_into().unwrap() + self.tcx.data_layout.pointer_size().bits().try_into().unwrap() } pub fn entry_fn(&self) -> Option { diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs index c779720f97b..27b41cc09ed 100644 --- a/compiler/rustc_target/src/callconv/loongarch.rs +++ b/compiler/rustc_target/src/callconv/loongarch.rs @@ -334,7 +334,7 @@ where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout + HasTargetSpec, { - let xlen = cx.data_layout().pointer_size.bits(); + let xlen = cx.data_layout().pointer_size().bits(); let flen = match &cx.target_spec().llvm_abiname[..] { "ilp32f" | "lp64f" => 32, "ilp32d" | "lp64d" => 64, @@ -369,7 +369,7 @@ where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout + HasTargetSpec, { - let grlen = cx.data_layout().pointer_size.bits(); + let grlen = cx.data_layout().pointer_size().bits(); for arg in fn_abi.args.iter_mut() { if arg.is_ignore() { diff --git a/compiler/rustc_target/src/callconv/mips.rs b/compiler/rustc_target/src/callconv/mips.rs index 6162267a0d0..48a01da865b 100644 --- a/compiler/rustc_target/src/callconv/mips.rs +++ b/compiler/rustc_target/src/callconv/mips.rs @@ -10,7 +10,7 @@ where ret.extend_integer_width_to(32); } else { ret.make_indirect(); - *offset += cx.data_layout().pointer_size; + *offset += cx.data_layout().pointer_size(); } } diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs index 71cc2a45563..ab3271220eb 100644 --- a/compiler/rustc_target/src/callconv/mod.rs +++ b/compiler/rustc_target/src/callconv/mod.rs @@ -733,7 +733,7 @@ impl<'a, Ty> FnAbi<'a, Ty> { } if arg_idx.is_none() - && arg.layout.size > Primitive::Pointer(AddressSpace::DATA).size(cx) * 2 + && arg.layout.size > Primitive::Pointer(AddressSpace::ZERO).size(cx) * 2 && !matches!(arg.layout.backend_repr, BackendRepr::SimdVector { .. }) { // Return values larger than 2 registers using a return area @@ -792,7 +792,7 @@ impl<'a, Ty> FnAbi<'a, Ty> { let size = arg.layout.size; if arg.layout.is_sized() - && size <= Primitive::Pointer(AddressSpace::DATA).size(cx) + && size <= Primitive::Pointer(AddressSpace::ZERO).size(cx) { // We want to pass small aggregates as immediates, but using // an LLVM aggregate type for this leads to bad optimizations, diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs index 6a2038f9381..a06f54d60e7 100644 --- a/compiler/rustc_target/src/callconv/riscv.rs +++ b/compiler/rustc_target/src/callconv/riscv.rs @@ -418,7 +418,7 @@ where "ilp32d" | "lp64d" => 64, _ => 0, }; - let xlen = cx.data_layout().pointer_size.bits(); + let xlen = cx.data_layout().pointer_size().bits(); let mut avail_gprs = 8; let mut avail_fprs = 8; @@ -448,7 +448,7 @@ where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout + HasTargetSpec, { - let xlen = cx.data_layout().pointer_size.bits(); + let xlen = cx.data_layout().pointer_size().bits(); for arg in fn_abi.args.iter_mut() { if arg.is_ignore() { diff --git a/compiler/rustc_target/src/callconv/sparc.rs b/compiler/rustc_target/src/callconv/sparc.rs index 6162267a0d0..48a01da865b 100644 --- a/compiler/rustc_target/src/callconv/sparc.rs +++ b/compiler/rustc_target/src/callconv/sparc.rs @@ -10,7 +10,7 @@ where ret.extend_integer_width_to(32); } else { ret.make_indirect(); - *offset += cx.data_layout().pointer_size; + *offset += cx.data_layout().pointer_size(); } } diff --git a/compiler/rustc_target/src/callconv/x86.rs b/compiler/rustc_target/src/callconv/x86.rs index 8328f818f9b..943ca808c27 100644 --- a/compiler/rustc_target/src/callconv/x86.rs +++ b/compiler/rustc_target/src/callconv/x86.rs @@ -219,7 +219,7 @@ where // SSE ABI. We prefer this over integer registers as float scalars need to be in SSE // registers for float operations, so that's the best place to pass them around. fn_abi.ret.cast_to(Reg { kind: RegKind::Vector, size: fn_abi.ret.layout.size }); - } else if fn_abi.ret.layout.size <= Primitive::Pointer(AddressSpace::DATA).size(cx) { + } else if fn_abi.ret.layout.size <= Primitive::Pointer(AddressSpace::ZERO).size(cx) { // Same size or smaller than pointer, return in an integer register. fn_abi.ret.cast_to(Reg { kind: RegKind::Integer, size: fn_abi.ret.layout.size }); } else { diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs index 7a49f004072..5346b206a17 100644 --- a/compiler/rustc_target/src/spec/mod.rs +++ b/compiler/rustc_target/src/spec/mod.rs @@ -2198,7 +2198,10 @@ pub struct TargetMetadata { impl Target { pub fn parse_data_layout(&self) -> Result> { - let mut dl = TargetDataLayout::parse_from_llvm_datalayout_string(&self.data_layout)?; + let mut dl = TargetDataLayout::parse_from_llvm_datalayout_string( + &self.data_layout, + self.options.default_address_space, + )?; // Perform consistency checks against the Target information. if dl.endian != self.endian { @@ -2209,9 +2212,10 @@ impl Target { } let target_pointer_width: u64 = self.pointer_width.into(); - if dl.pointer_size.bits() != target_pointer_width { + let dl_pointer_size: u64 = dl.pointer_size().bits(); + if dl_pointer_size != target_pointer_width { return Err(TargetDataLayoutErrors::InconsistentTargetPointerWidth { - pointer_size: dl.pointer_size.bits(), + pointer_size: dl_pointer_size, target: self.pointer_width, }); } @@ -2650,6 +2654,11 @@ pub struct TargetOptions { /// Whether the target supports XRay instrumentation. pub supports_xray: bool, + /// The default address space for this target. When using LLVM as a backend, most targets simply + /// use LLVM's default address space (0). Some other targets, such as CHERI targets, use a + /// custom default address space (in this specific case, `200`). + pub default_address_space: rustc_abi::AddressSpace, + /// Whether the targets supports -Z small-data-threshold small_data_threshold_support: SmallDataThresholdSupport, } @@ -2878,6 +2887,7 @@ impl Default for TargetOptions { entry_name: "main".into(), entry_abi: CanonAbi::C, supports_xray: false, + default_address_space: rustc_abi::AddressSpace::ZERO, small_data_threshold_support: SmallDataThresholdSupport::DefaultForArch, } } diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs index 150f5d118e0..3f83b4d50aa 100644 --- a/compiler/rustc_transmute/src/layout/tree.rs +++ b/compiler/rustc_transmute/src/layout/tree.rs @@ -296,7 +296,7 @@ pub(crate) mod rustc { } let target = cx.data_layout(); - let pointer_size = target.pointer_size; + let pointer_size = target.pointer_size(); match ty.kind() { ty::Bool => Ok(Self::bool()), diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index a225b712d4b..163e2b30883 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -379,7 +379,7 @@ fn layout_of_uncached<'tcx>( // Potentially-wide pointers. ty::Ref(_, pointee, _) | ty::RawPtr(pointee, _) => { - let mut data_ptr = scalar_unit(Pointer(AddressSpace::DATA)); + let mut data_ptr = scalar_unit(Pointer(AddressSpace::ZERO)); if !ty.is_raw_ptr() { data_ptr.valid_range_mut().start = 1; } @@ -435,7 +435,7 @@ fn layout_of_uncached<'tcx>( } ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)), ty::Dynamic(..) => { - let mut vtable = scalar_unit(Pointer(AddressSpace::DATA)); + let mut vtable = scalar_unit(Pointer(AddressSpace::ZERO)); vtable.valid_range_mut().start = 1; vtable } diff --git a/src/tools/clippy/clippy_lints/src/casts/fn_to_numeric_cast.rs b/src/tools/clippy/clippy_lints/src/casts/fn_to_numeric_cast.rs index 105477093b5..55e27a05f3c 100644 --- a/src/tools/clippy/clippy_lints/src/casts/fn_to_numeric_cast.rs +++ b/src/tools/clippy/clippy_lints/src/casts/fn_to_numeric_cast.rs @@ -17,7 +17,7 @@ pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_expr: &Expr<'_>, ty::FnDef(..) | ty::FnPtr(..) => { let mut applicability = Applicability::MaybeIncorrect; - if to_nbits >= cx.tcx.data_layout.pointer_size.bits() && !cast_to.is_usize() { + if to_nbits >= cx.tcx.data_layout.pointer_size().bits() && !cast_to.is_usize() { let from_snippet = snippet_with_applicability(cx, cast_expr.span, "x", &mut applicability); span_lint_and_sugg( cx, diff --git a/src/tools/clippy/clippy_lints/src/casts/fn_to_numeric_cast_with_truncation.rs b/src/tools/clippy/clippy_lints/src/casts/fn_to_numeric_cast_with_truncation.rs index 700b7d0d426..4da79205e20 100644 --- a/src/tools/clippy/clippy_lints/src/casts/fn_to_numeric_cast_with_truncation.rs +++ b/src/tools/clippy/clippy_lints/src/casts/fn_to_numeric_cast_with_truncation.rs @@ -17,7 +17,7 @@ pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_expr: &Expr<'_>, let mut applicability = Applicability::MaybeIncorrect; let from_snippet = snippet_with_applicability(cx, cast_expr.span, "x", &mut applicability); - if to_nbits < cx.tcx.data_layout.pointer_size.bits() { + if to_nbits < cx.tcx.data_layout.pointer_size().bits() { span_lint_and_sugg( cx, FN_TO_NUMERIC_CAST_WITH_TRUNCATION, diff --git a/src/tools/clippy/clippy_lints/src/casts/utils.rs b/src/tools/clippy/clippy_lints/src/casts/utils.rs index 318a1646477..d846d78b9ee 100644 --- a/src/tools/clippy/clippy_lints/src/casts/utils.rs +++ b/src/tools/clippy/clippy_lints/src/casts/utils.rs @@ -5,7 +5,7 @@ use rustc_middle::ty::{self, AdtDef, IntTy, Ty, TyCtxt, UintTy, VariantDiscr}; /// integral type. pub(super) fn int_ty_to_nbits(tcx: TyCtxt<'_>, ty: Ty<'_>) -> Option { match ty.kind() { - ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => Some(tcx.data_layout.pointer_size.bits()), + ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => Some(tcx.data_layout.pointer_size().bits()), ty::Int(i) => i.bit_width(), ty::Uint(i) => i.bit_width(), _ => None, diff --git a/src/tools/clippy/clippy_lints/src/enum_clike.rs b/src/tools/clippy/clippy_lints/src/enum_clike.rs index 098571a5351..c828fc57f76 100644 --- a/src/tools/clippy/clippy_lints/src/enum_clike.rs +++ b/src/tools/clippy/clippy_lints/src/enum_clike.rs @@ -35,7 +35,7 @@ declare_lint_pass!(UnportableVariant => [ENUM_CLIKE_UNPORTABLE_VARIANT]); impl<'tcx> LateLintPass<'tcx> for UnportableVariant { #[expect(clippy::cast_possible_wrap)] fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx Item<'_>) { - if cx.tcx.data_layout.pointer_size.bits() != 64 { + if cx.tcx.data_layout.pointer_size().bits() != 64 { return; } if let ItemKind::Enum(_, _, def) = &item.kind { diff --git a/src/tools/clippy/clippy_utils/src/consts.rs b/src/tools/clippy/clippy_utils/src/consts.rs index 09299c869dc..ba0376e4d40 100644 --- a/src/tools/clippy/clippy_utils/src/consts.rs +++ b/src/tools/clippy/clippy_utils/src/consts.rs @@ -918,7 +918,7 @@ fn mir_is_empty<'tcx>(tcx: TyCtxt<'tcx>, result: mir::Const<'tcx>) -> Option { - let mut data_ptr = scalar_unit(dl, Primitive::Pointer(AddressSpace::DATA)); + let mut data_ptr = scalar_unit(dl, Primitive::Pointer(AddressSpace::ZERO)); if matches!(ty.kind(Interner), TyKind::Ref(..)) { data_ptr.valid_range_mut().start = 1; } @@ -285,7 +285,7 @@ pub fn layout_of_ty_query( scalar_unit(dl, Primitive::Int(dl.ptr_sized_integer(), false)) } TyKind::Dyn(..) => { - let mut vtable = scalar_unit(dl, Primitive::Pointer(AddressSpace::DATA)); + let mut vtable = scalar_unit(dl, Primitive::Pointer(AddressSpace::ZERO)); vtable.valid_range_mut().start = 1; vtable } diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout/target.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout/target.rs index e1e1c44996c..88c33ecccad 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/layout/target.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout/target.rs @@ -2,7 +2,7 @@ use base_db::Crate; use hir_def::layout::TargetDataLayout; -use rustc_abi::{AlignFromBytesError, TargetDataLayoutErrors}; +use rustc_abi::{AlignFromBytesError, TargetDataLayoutErrors, AddressSpace}; use triomphe::Arc; use crate::db::HirDatabase; @@ -12,7 +12,7 @@ pub fn target_data_layout_query( krate: Crate, ) -> Result, Arc> { match &krate.workspace_data(db).data_layout { - Ok(it) => match TargetDataLayout::parse_from_llvm_datalayout_string(it) { + Ok(it) => match TargetDataLayout::parse_from_llvm_datalayout_string(it, AddressSpace::ZERO) { Ok(it) => Ok(Arc::new(it)), Err(e) => { Err(match e { @@ -39,6 +39,7 @@ pub fn target_data_layout_query( target, } => format!(r#"inconsistent target specification: "data-layout" claims pointers are {pointer_size}-bit, while "target-pointer-width" is `{target}`"#), TargetDataLayoutErrors::InvalidBitsSize { err } => err, + TargetDataLayoutErrors::UnknownPointerSpecification { err } => format!(r#"use of unknown pointer specifer in "data-layout": {err}"#), }.into()) } }, diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs index 1ec55a82092..55fada14363 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs @@ -630,7 +630,7 @@ impl Evaluator<'_> { Ok(target_data_layout) => target_data_layout, Err(e) => return Err(MirEvalError::TargetDataLayoutNotAvailable(e)), }; - let cached_ptr_size = target_data_layout.pointer_size.bytes_usize(); + let cached_ptr_size = target_data_layout.pointer_size().bytes_usize(); Ok(Evaluator { target_data_layout, stack: vec![0], -- cgit 1.4.1-3-g733a5 From 1eff043e7aab5f5634c1e29f5cf161a6437bcfa3 Mon Sep 17 00:00:00 2001 From: Martin Nordholts Date: Mon, 7 Jul 2025 09:22:28 +0200 Subject: rustc_codegen_llvm: Remove reference to non-existing `no_landing_pads()` Removing this reference was forgotten in eb4725fc54056. Grepping for no_landing_pads returns no hits after this. --- compiler/rustc_codegen_llvm/src/attributes.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index adb53e0b66c..5a065f31c30 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -377,12 +377,11 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>( // that no exceptions passes by it. This is normally the case for the // ELF x86-64 abi, but it can be disabled for some compilation units. // - // Typically when we're compiling with `-C panic=abort` (which implies this - // `no_landing_pads` check) we don't need `uwtable` because we can't - // generate any exceptions! On Windows, however, exceptions include other - // events such as illegal instructions, segfaults, etc. This means that on - // Windows we end up still needing the `uwtable` attribute even if the `-C - // panic=abort` flag is passed. + // Typically when we're compiling with `-C panic=abort` we don't need + // `uwtable` because we can't generate any exceptions! On Windows, however, + // exceptions include other events such as illegal instructions, segfaults, + // etc. This means that on Windows we end up still needing the `uwtable` + // attribute even if the `-C panic=abort` flag is passed. // // You can also find more info on why Windows always requires uwtables here: // https://bugzilla.mozilla.org/show_bug.cgi?id=1302078 -- cgit 1.4.1-3-g733a5 From aa364cac52f6f4b79dfd3eeb7189bbd259184d57 Mon Sep 17 00:00:00 2001 From: Martin Nordholts Date: Mon, 7 Jul 2025 09:04:16 +0200 Subject: compiler: Deduplicate `must_emit_unwind_tables()` comments There is one comment at a call site and one comment in the function definition that are mostly saying the same thing. Fold the call site comment into the function definition comment to reduce duplication. There are actually some inaccuracies in the comments but let's deduplicate before we address the inaccuracies. --- compiler/rustc_codegen_llvm/src/attributes.rs | 15 --------------- compiler/rustc_session/src/session.rs | 9 ++++++++- 2 files changed, 8 insertions(+), 16 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index 5a065f31c30..1ea5a062254 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -370,21 +370,6 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>( }; to_add.extend(inline_attr(cx, inline)); - // The `uwtable` attribute according to LLVM is: - // - // This attribute indicates that the ABI being targeted requires that an - // unwind table entry be produced for this function even if we can show - // that no exceptions passes by it. This is normally the case for the - // ELF x86-64 abi, but it can be disabled for some compilation units. - // - // Typically when we're compiling with `-C panic=abort` we don't need - // `uwtable` because we can't generate any exceptions! On Windows, however, - // exceptions include other events such as illegal instructions, segfaults, - // etc. This means that on Windows we end up still needing the `uwtable` - // attribute even if the `-C panic=abort` flag is passed. - // - // You can also find more info on why Windows always requires uwtables here: - // https://bugzilla.mozilla.org/show_bug.cgi?id=1302078 if cx.sess().must_emit_unwind_tables() { to_add.push(uwtable_attr(cx.llcx, cx.sess().opts.unstable_opts.use_sync_unwind)); } diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs index 8386fe8dab0..85bd8340c3c 100644 --- a/compiler/rustc_session/src/session.rs +++ b/compiler/rustc_session/src/session.rs @@ -776,8 +776,15 @@ impl Session { pub fn must_emit_unwind_tables(&self) -> bool { // This is used to control the emission of the `uwtable` attribute on - // LLVM functions. + // LLVM functions. The `uwtable` attribute according to LLVM is: // + // This attribute indicates that the ABI being targeted requires that an + // unwind table entry be produced for this function even if we can show + // that no exceptions passes by it. This is normally the case for the + // ELF x86-64 abi, but it can be disabled for some compilation units. + // + // Typically when we're compiling with `-C panic=abort` we don't need + // `uwtable` because we can't generate any exceptions! // Unwind tables are needed when compiling with `-C panic=unwind`, but // LLVM won't omit unwind tables unless the function is also marked as // `nounwind`, so users are allowed to disable `uwtable` emission. -- cgit 1.4.1-3-g733a5 From 3b48407f93abd565b24d138cc7df56ee37855d82 Mon Sep 17 00:00:00 2001 From: Yotam Ofek Date: Sat, 28 Jun 2025 23:40:02 +0000 Subject: Remove unused allow attrs --- compiler/rustc_codegen_gcc/src/lib.rs | 1 - compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs | 1 - compiler/rustc_errors/src/lib.rs | 1 - compiler/rustc_infer/src/lib.rs | 2 -- compiler/rustc_lint/src/early/diagnostics.rs | 3 --- compiler/rustc_lint/src/lints.rs | 1 - compiler/rustc_middle/src/arena.rs | 2 -- compiler/rustc_parse/src/lib.rs | 1 - compiler/rustc_query_impl/src/lib.rs | 1 - src/librustdoc/json/conversions.rs | 2 -- 10 files changed, 15 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index 1a6eec0ed0b..d8fae1ca47d 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -19,7 +19,6 @@ #![doc(rust_logo)] #![feature(rustdoc_internals)] #![feature(rustc_private)] -#![allow(broken_intra_doc_links)] #![recursion_limit = "256"] #![warn(rust_2018_idioms)] #![warn(unused_lifetimes)] diff --git a/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs index 7b00b2da6ba..c696b8d8ff2 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs @@ -1,4 +1,3 @@ -#![allow(non_camel_case_types)] #![expect(dead_code)] use libc::{c_char, c_uint}; diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs index 69ad15c6081..381d780077d 100644 --- a/compiler/rustc_errors/src/lib.rs +++ b/compiler/rustc_errors/src/lib.rs @@ -3,7 +3,6 @@ //! This module contains the code for creating and emitting diagnostics. // tidy-alphabetical-start -#![allow(incomplete_features)] #![allow(internal_features)] #![allow(rustc::diagnostic_outside_of_impl)] #![allow(rustc::direct_use_of_rustc_type_ir)] diff --git a/compiler/rustc_infer/src/lib.rs b/compiler/rustc_infer/src/lib.rs index 285e2912937..e562c583313 100644 --- a/compiler/rustc_infer/src/lib.rs +++ b/compiler/rustc_infer/src/lib.rs @@ -14,9 +14,7 @@ // tidy-alphabetical-start #![allow(internal_features)] -#![allow(rustc::diagnostic_outside_of_impl)] #![allow(rustc::direct_use_of_rustc_type_ir)] -#![allow(rustc::untranslatable_diagnostic)] #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(assert_matches)] diff --git a/compiler/rustc_lint/src/early/diagnostics.rs b/compiler/rustc_lint/src/early/diagnostics.rs index 3b0a36186b6..653559009cc 100644 --- a/compiler/rustc_lint/src/early/diagnostics.rs +++ b/compiler/rustc_lint/src/early/diagnostics.rs @@ -1,6 +1,3 @@ -#![allow(rustc::diagnostic_outside_of_impl)] -#![allow(rustc::untranslatable_diagnostic)] - use std::borrow::Cow; use rustc_ast::util::unicode::TEXT_FLOW_CONTROL_CHARS; diff --git a/compiler/rustc_lint/src/lints.rs b/compiler/rustc_lint/src/lints.rs index 5e05b58146e..21148833eaf 100644 --- a/compiler/rustc_lint/src/lints.rs +++ b/compiler/rustc_lint/src/lints.rs @@ -1,4 +1,3 @@ -#![allow(rustc::diagnostic_outside_of_impl)] #![allow(rustc::untranslatable_diagnostic)] use std::num::NonZero; diff --git a/compiler/rustc_middle/src/arena.rs b/compiler/rustc_middle/src/arena.rs index a0f45974089..f140ab4755b 100644 --- a/compiler/rustc_middle/src/arena.rs +++ b/compiler/rustc_middle/src/arena.rs @@ -1,5 +1,3 @@ -#![allow(rustc::usage_of_ty_tykind)] - /// This higher-order macro declares a list of types which can be allocated by `Arena`. /// /// Specifying the `decode` modifier will add decode impls for `&T` and `&[T]` where `T` is the type diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs index 8ea535599c9..2050c5f9608 100644 --- a/compiler/rustc_parse/src/lib.rs +++ b/compiler/rustc_parse/src/lib.rs @@ -1,7 +1,6 @@ //! The main parser interface. // tidy-alphabetical-start -#![allow(internal_features)] #![allow(rustc::diagnostic_outside_of_impl)] #![allow(rustc::untranslatable_diagnostic)] #![feature(assert_matches)] diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index b7d8af2c995..306d4dbc4b4 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -2,7 +2,6 @@ // tidy-alphabetical-start #![allow(internal_features)] -#![allow(unused_parens)] #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(min_specialization)] diff --git a/src/librustdoc/json/conversions.rs b/src/librustdoc/json/conversions.rs index f51b35097f6..abf3f3fcedd 100644 --- a/src/librustdoc/json/conversions.rs +++ b/src/librustdoc/json/conversions.rs @@ -2,8 +2,6 @@ //! the `clean` types but with some fields removed or stringified to simplify the output and not //! expose unstable compiler internals. -#![allow(rustc::default_hash_types)] - use rustc_abi::ExternAbi; use rustc_ast::ast; use rustc_attr_data_structures::{self as attrs, DeprecatedSince}; -- cgit 1.4.1-3-g733a5 From 49421d1fa382fba84792e5d5dd7721c1c3e0e46e Mon Sep 17 00:00:00 2001 From: mejrs <59372212+mejrs@users.noreply.github.com> Date: Tue, 24 Jun 2025 16:58:46 +0200 Subject: Remove support for dynamic allocas --- compiler/rustc_codegen_gcc/src/builder.rs | 4 --- compiler/rustc_codegen_llvm/src/builder.rs | 10 ------ compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 6 ---- compiler/rustc_codegen_ssa/src/mir/mod.rs | 6 ++-- compiler/rustc_codegen_ssa/src/mir/operand.rs | 40 +----------------------- compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 21 ------------- compiler/rustc_codegen_ssa/src/mir/statement.rs | 7 ++++- compiler/rustc_codegen_ssa/src/traits/builder.rs | 1 - library/core/src/mem/mod.rs | 2 +- 9 files changed, 11 insertions(+), 86 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index b1785af444a..28d1ec7d895 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -926,10 +926,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { .get_address(self.location) } - fn dynamic_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> { - unimplemented!(); - } - fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> { let block = self.llbb(); let function = block.get_function(); diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index d0aa7320b4b..9c3b866aa3c 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -538,16 +538,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn dynamic_alloca(&mut self, size: &'ll Value, align: Align) -> &'ll Value { - unsafe { - let alloca = - llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), size, UNNAMED); - llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); - // Cast to default addrspace if necessary - llvm::LLVMBuildPointerCast(self.llbuilder, alloca, self.cx().type_ptr(), UNNAMED) - } - } - fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value { unsafe { let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED); diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 91ada856d59..00a6ec7dfba 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1492,12 +1492,6 @@ unsafe extern "C" { Ty: &'a Type, Name: *const c_char, ) -> &'a Value; - pub(crate) fn LLVMBuildArrayAlloca<'a>( - B: &Builder<'a>, - Ty: &'a Type, - Val: &'a Value, - Name: *const c_char, - ) -> &'a Value; pub(crate) fn LLVMBuildLoad2<'a>( B: &Builder<'a>, Ty: &'a Type, diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 10b44a1faf0..4e35b143173 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -140,8 +140,8 @@ enum LocalRef<'tcx, V> { Place(PlaceRef<'tcx, V>), /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place). /// `*p` is the wide pointer that references the actual unsized place. - /// Every time it is initialized, we have to reallocate the place - /// and update the wide pointer. That's the reason why it is indirect. + /// + /// Rust has no alloca and thus no ability to move the unsized place. UnsizedPlace(PlaceRef<'tcx, V>), /// The backend [`OperandValue`] has already been generated. Operand(OperandRef<'tcx, V>), @@ -498,7 +498,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( LocalRef::Place(PlaceRef::new_sized(llarg, arg.layout)) } } - // Unsized indirect qrguments + // Unsized indirect arguments PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => { // As the storage for the indirect argument lives during // the whole function call, we just copy the wide pointer. diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 2896dfd5463..f6a8e1fc6a5 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -16,9 +16,9 @@ use tracing::{debug, instrument}; use super::place::{PlaceRef, PlaceValue}; use super::rvalue::transmute_scalar; use super::{FunctionCx, LocalRef}; +use crate::MemFlags; use crate::common::IntPredicate; use crate::traits::*; -use crate::{MemFlags, size_of_val}; /// The representation of a Rust value. The enum variant is in fact /// uniquely determined by the value's type, but is kept as a @@ -800,44 +800,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { } } } - - pub fn store_unsized>( - self, - bx: &mut Bx, - indirect_dest: PlaceRef<'tcx, V>, - ) { - debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest); - // `indirect_dest` must have `*mut T` type. We extract `T` out of it. - let unsized_ty = indirect_dest - .layout - .ty - .builtin_deref(true) - .unwrap_or_else(|| bug!("indirect_dest has non-pointer type: {:?}", indirect_dest)); - - let OperandValue::Ref(PlaceValue { llval: llptr, llextra: Some(llextra), .. }) = self - else { - bug!("store_unsized called with a sized value (or with an extern type)") - }; - - // Allocate an appropriate region on the stack, and copy the value into it. Since alloca - // doesn't support dynamic alignment, we allocate an extra align - 1 bytes, and align the - // pointer manually. - let (size, align) = size_of_val::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); - let one = bx.const_usize(1); - let align_minus_1 = bx.sub(align, one); - let size_extra = bx.add(size, align_minus_1); - let min_align = Align::ONE; - let alloca = bx.dynamic_alloca(size_extra, min_align); - let address = bx.ptrtoint(alloca, bx.type_isize()); - let neg_address = bx.neg(address); - let offset = bx.and(neg_address, align_minus_1); - let dst = bx.inbounds_ptradd(alloca, offset); - bx.memcpy(dst, min_align, llptr, min_align, size, MemFlags::empty()); - - // Store the allocated region and the extra to the indirect place. - let indirect_operand = OperandValue::Pair(dst, llextra); - indirect_operand.store(bx, indirect_dest); - } } impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 43726e93252..4572dc19594 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -328,27 +328,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Some(imm) } - pub(crate) fn codegen_rvalue_unsized( - &mut self, - bx: &mut Bx, - indirect_dest: PlaceRef<'tcx, Bx::Value>, - rvalue: &mir::Rvalue<'tcx>, - ) { - debug!( - "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})", - indirect_dest.val.llval, rvalue - ); - - match *rvalue { - mir::Rvalue::Use(ref operand) => { - let cg_operand = self.codegen_operand(bx, operand); - cg_operand.val.store_unsized(bx, indirect_dest); - } - - _ => bug!("unsized assignment other than `Rvalue::Use`"), - } - } - pub(crate) fn codegen_rvalue_operand( &mut self, bx: &mut Bx, diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs index cd55a838a75..f164e0f9123 100644 --- a/compiler/rustc_codegen_ssa/src/mir/statement.rs +++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs @@ -15,7 +15,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match self.locals[index] { LocalRef::Place(cg_dest) => self.codegen_rvalue(bx, cg_dest, rvalue), LocalRef::UnsizedPlace(cg_indirect_dest) => { - self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue) + let ty = cg_indirect_dest.layout.ty; + span_bug!( + statement.source_info.span, + "cannot reallocate from `UnsizedPlace({ty})` \ + into `{rvalue:?}`; dynamic alloca is not supported", + ); } LocalRef::PendingOperand => { let operand = self.codegen_rvalue_operand(bx, rvalue); diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index 9d367748c2a..1f266514f34 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -224,7 +224,6 @@ pub trait BuilderMethods<'a, 'tcx>: fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value; fn alloca(&mut self, size: Size, align: Align) -> Self::Value; - fn dynamic_alloca(&mut self, size: Self::Value, align: Align) -> Self::Value; fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value; fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value; diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs index c00585de064..1bd12d818cf 100644 --- a/library/core/src/mem/mod.rs +++ b/library/core/src/mem/mod.rs @@ -151,7 +151,7 @@ pub const fn forget(t: T) { /// /// While Rust does not permit unsized locals since its removal in [#111942] it is /// still possible to call functions with unsized values from a function argument -/// or in-place construction. +/// or place expression. /// /// ```rust /// #![feature(unsized_fn_params, forget_unsized)] -- cgit 1.4.1-3-g733a5 From 045557797431bfff807145341efbe8f8fb08817e Mon Sep 17 00:00:00 2001 From: Dillon Amburgey Date: Tue, 8 Jul 2025 06:17:03 -0500 Subject: fix: correct parameter names in LLVMRustBuildMinNum and LLVMRustBuildMaxNum FFI declarations --- compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 91ada856d59..b9dd10ff014 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1980,12 +1980,12 @@ unsafe extern "C" { pub(crate) fn LLVMRustBuildMinNum<'a>( B: &Builder<'a>, LHS: &'a Value, - LHS: &'a Value, + RHS: &'a Value, ) -> &'a Value; pub(crate) fn LLVMRustBuildMaxNum<'a>( B: &Builder<'a>, LHS: &'a Value, - LHS: &'a Value, + RHS: &'a Value, ) -> &'a Value; // Atomic Operations -- cgit 1.4.1-3-g733a5 From b2299e20b2c2a25254fb554ccc59a8b60fdc25e0 Mon Sep 17 00:00:00 2001 From: Dillon Amburgey Date: Tue, 8 Jul 2025 06:18:18 -0500 Subject: fix: correct assertion to check for 'noinline' attribute presence before removal --- compiler/rustc_codegen_llvm/src/back/lto.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 9c62244f3c9..74418adc43c 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -680,7 +680,7 @@ pub(crate) fn run_pass_manager( if attributes::has_string_attr(function, enzyme_marker) { // Sanity check: Ensure 'noinline' is present before replacing it. assert!( - !attributes::has_attr(function, Function, llvm::AttributeKind::NoInline), + attributes::has_attr(function, Function, llvm::AttributeKind::NoInline), "Expected __enzyme function to have 'noinline' before adding 'alwaysinline'" ); -- cgit 1.4.1-3-g733a5 From 6fc5d4edda88d0dd28c45ccba1a1e217ddad11c4 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Mon, 7 Jul 2025 19:23:06 +0200 Subject: emit `.att_syntax` when global/naked asm use that option --- compiler/rustc_codegen_llvm/src/asm.rs | 22 +++++++--- tests/assembly/emit-intel-att-syntax.rs | 75 +++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 7 deletions(-) create mode 100644 tests/assembly/emit-intel-att-syntax.rs (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 9ddadcf16aa..a643a91141e 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -384,15 +384,19 @@ impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> { ) { let asm_arch = self.tcx.sess.asm_arch.unwrap(); - // Default to Intel syntax on x86 - let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64) - && !options.contains(InlineAsmOptions::ATT_SYNTAX); - // Build the template string let mut template_str = String::new(); - if intel_syntax { - template_str.push_str(".intel_syntax\n"); + + // On X86 platforms there are two assembly syntaxes. Rust uses intel by default, + // but AT&T can be specified explicitly. + if matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64) { + if options.contains(InlineAsmOptions::ATT_SYNTAX) { + template_str.push_str(".att_syntax\n") + } else { + template_str.push_str(".intel_syntax\n") + } } + for piece in template { match *piece { InlineAsmTemplatePiece::String(ref s) => template_str.push_str(s), @@ -431,7 +435,11 @@ impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> { } } } - if intel_syntax { + + // Just to play it safe, if intel was used, reset the assembly syntax to att. + if matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64) + && !options.contains(InlineAsmOptions::ATT_SYNTAX) + { template_str.push_str("\n.att_syntax\n"); } diff --git a/tests/assembly/emit-intel-att-syntax.rs b/tests/assembly/emit-intel-att-syntax.rs new file mode 100644 index 00000000000..7b479a0f79e --- /dev/null +++ b/tests/assembly/emit-intel-att-syntax.rs @@ -0,0 +1,75 @@ +//@ assembly-output: emit-asm +//@ revisions: att intel +//@ [att] compile-flags: -Cllvm-args=-x86-asm-syntax=att +//@ [intel] compile-flags: -Cllvm-args=-x86-asm-syntax=intel +//@ only-x86_64 + +#![crate_type = "lib"] + +// CHECK-LABEL: naked_att: +// intel-CHECK: mov rax, qword ptr [rdi] +// intel-CHECK: ret +// att-CHECK: movq (%rdi), %rax +// att-CHECK: retq + +#[unsafe(naked)] +#[unsafe(no_mangle)] +extern "sysv64" fn naked_att() { + std::arch::naked_asm!( + " + movq (%rdi), %rax + retq + ", + options(att_syntax), + ); +} + +// CHECK-LABEL: naked_intel: +// intel-CHECK: mov rax, rdi +// intel-CHECK: ret +// att-CHECK: movq (%rdi), %rax +// att-CHECK: retq + +#[unsafe(naked)] +#[unsafe(no_mangle)] +extern "sysv64" fn naked_intel() { + std::arch::naked_asm!( + " + mov rax, rdi + ret + ", + options(), + ); +} + +// CHECK-LABEL: global_att: +// intel-CHECK: mov rax, rdi +// intel-CHECK: ret +// att-CHECK: movq (%rdi), %rax +// att-CHECK: retq + +core::arch::global_asm!( + " + .globl global_att + global_att: + movq (%rdi), %rax + retq + ", + options(att_syntax), +); + +// CHECK-LABEL: global_intel: +// intel-CHECK: mov rax, rdi +// intel-CHECK: ret +// att-CHECK: movq (%rdi), %rax +// att-CHECK: retq + +core::arch::global_asm!( + " + .globl global_intel + global_intel: + mov rax, rdi + ret + ", + options(), +); -- cgit 1.4.1-3-g733a5 From 486ffda9dcd0d4ef0a09d81e6ce5f241e77526a1 Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Wed, 12 Mar 2025 10:26:37 +0000 Subject: Add opaque TypeId handles for CTFE --- compiler/rustc_codegen_cranelift/src/constant.rs | 13 ++++ compiler/rustc_codegen_gcc/src/common.rs | 10 ++- compiler/rustc_codegen_llvm/src/common.rs | 18 +++-- compiler/rustc_codegen_llvm/src/consts.rs | 5 +- compiler/rustc_codegen_ssa/src/mir/operand.rs | 2 +- compiler/rustc_const_eval/messages.ftl | 2 + compiler/rustc_const_eval/src/errors.rs | 6 +- .../rustc_const_eval/src/interpret/intrinsics.rs | 79 +++++++++++++++++++- compiler/rustc_const_eval/src/interpret/memory.rs | 30 +++++++- .../rustc_const_eval/src/interpret/projection.rs | 6 +- .../rustc_const_eval/src/interpret/validity.rs | 64 +++++++++-------- compiler/rustc_hir/src/lang_items.rs | 2 + compiler/rustc_hir_analysis/src/check/intrinsic.rs | 9 ++- compiler/rustc_middle/src/mir/interpret/error.rs | 2 + compiler/rustc_middle/src/mir/interpret/mod.rs | 33 +++++++-- compiler/rustc_middle/src/mir/pretty.rs | 1 + compiler/rustc_middle/src/ty/print/pretty.rs | 1 + compiler/rustc_monomorphize/src/collector.rs | 1 + compiler/rustc_passes/src/reachable.rs | 1 + compiler/rustc_span/src/symbol.rs | 1 + compiler/stable_mir/src/mir/alloc.rs | 3 + .../stable_mir/src/unstable/convert/stable/mir.rs | 3 + library/core/Cargo.toml | 2 - library/core/src/any.rs | 84 +++++++++++++++------- library/core/src/intrinsics/mod.rs | 15 +++- library/coretests/tests/any.rs | 8 --- library/std/Cargo.toml | 2 - library/sysroot/Cargo.toml | 1 - tests/codegen/error-provide.rs | 6 +- tests/ui/const-generics/issues/issue-90318.rs | 3 +- tests/ui/const-generics/issues/issue-90318.stderr | 25 +------ tests/ui/consts/const_cmp_type_id.rs | 3 +- tests/ui/consts/const_cmp_type_id.stderr | 20 +++--- tests/ui/consts/const_transmute_type_id.rs | 11 +++ tests/ui/consts/const_transmute_type_id.stderr | 12 ++++ tests/ui/consts/const_transmute_type_id2.rs | 14 ++++ tests/ui/consts/const_transmute_type_id2.stderr | 15 ++++ tests/ui/consts/const_transmute_type_id3.rs | 16 +++++ tests/ui/consts/const_transmute_type_id3.stderr | 15 ++++ tests/ui/consts/const_transmute_type_id4.rs | 16 +++++ tests/ui/consts/const_transmute_type_id4.stderr | 15 ++++ tests/ui/consts/issue-73976-monomorphic.rs | 2 +- tests/ui/consts/issue-73976-monomorphic.stderr | 9 --- 43 files changed, 438 insertions(+), 148 deletions(-) create mode 100644 tests/ui/consts/const_transmute_type_id.rs create mode 100644 tests/ui/consts/const_transmute_type_id.stderr create mode 100644 tests/ui/consts/const_transmute_type_id2.rs create mode 100644 tests/ui/consts/const_transmute_type_id2.stderr create mode 100644 tests/ui/consts/const_transmute_type_id3.rs create mode 100644 tests/ui/consts/const_transmute_type_id3.stderr create mode 100644 tests/ui/consts/const_transmute_type_id4.rs create mode 100644 tests/ui/consts/const_transmute_type_id4.stderr delete mode 100644 tests/ui/consts/issue-73976-monomorphic.stderr (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs index ed06423b260..85adf0f3716 100644 --- a/compiler/rustc_codegen_cranelift/src/constant.rs +++ b/compiler/rustc_codegen_cranelift/src/constant.rs @@ -175,6 +175,13 @@ pub(crate) fn codegen_const_value<'tcx>( fx.module.declare_data_in_func(data_id, &mut fx.bcx.func); fx.bcx.ins().global_value(fx.pointer_type, local_data_id) } + GlobalAlloc::TypeId { .. } => { + return CValue::const_val( + fx, + layout, + ScalarInt::try_from_target_usize(offset.bytes(), fx.tcx).unwrap(), + ); + } GlobalAlloc::Static(def_id) => { assert!(fx.tcx.is_static(def_id)); let data_id = data_id_for_static( @@ -360,6 +367,7 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant GlobalAlloc::Memory(alloc) => alloc, GlobalAlloc::Function { .. } | GlobalAlloc::Static(_) + | GlobalAlloc::TypeId { .. } | GlobalAlloc::VTable(..) => { unreachable!() } @@ -471,6 +479,11 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant .principal() .map(|principal| tcx.instantiate_bound_regions_with_erased(principal)), ), + GlobalAlloc::TypeId { .. } => { + // Nothing to do, the bytes/offset of this pointer have already been written together with all other bytes, + // so we just need to drop this provenance. + continue; + } GlobalAlloc::Static(def_id) => { if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index 32713eb56c6..28848ca6184 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -1,7 +1,6 @@ use gccjit::{LValue, RValue, ToRValue, Type}; -use rustc_abi as abi; -use rustc_abi::HasDataLayout; use rustc_abi::Primitive::Pointer; +use rustc_abi::{self as abi, HasDataLayout}; use rustc_codegen_ssa::traits::{ BaseTypeCodegenMethods, ConstCodegenMethods, MiscCodegenMethods, StaticCodegenMethods, }; @@ -282,6 +281,13 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> { let init = self.const_data_from_alloc(alloc); self.static_addr_of(init, alloc.inner().align, None) } + GlobalAlloc::TypeId { .. } => { + let val = self.const_usize(offset.bytes()); + // This is still a variable of pointer type, even though we only use the provenance + // of that pointer in CTFE and Miri. But to make LLVM's type system happy, + // we need an int-to-ptr cast here (it doesn't matter at all which provenance that picks). + return self.context.new_cast(None, val, ty); + } GlobalAlloc::Static(def_id) => { assert!(self.tcx.is_static(def_id)); self.get_static(def_id).get_address(None) diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 92f38565eef..b9b5c776d86 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -3,9 +3,8 @@ use std::borrow::Borrow; use libc::{c_char, c_uint}; -use rustc_abi as abi; -use rustc_abi::HasDataLayout; use rustc_abi::Primitive::Pointer; +use rustc_abi::{self as abi, HasDataLayout as _}; use rustc_ast::Mutability; use rustc_codegen_ssa::common::TypeKind; use rustc_codegen_ssa::traits::*; @@ -284,7 +283,8 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { self.const_bitcast(llval, llty) }; } else { - let init = const_alloc_to_llvm(self, alloc, /*static*/ false); + let init = + const_alloc_to_llvm(self, alloc.inner(), /*static*/ false); let alloc = alloc.inner(); let value = match alloc.mutability { Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), @@ -316,15 +316,19 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { }), ))) .unwrap_memory(); - let init = const_alloc_to_llvm(self, alloc, /*static*/ false); - let value = self.static_addr_of_impl(init, alloc.inner().align, None); - value + let init = const_alloc_to_llvm(self, alloc.inner(), /*static*/ false); + self.static_addr_of_impl(init, alloc.inner().align, None) } GlobalAlloc::Static(def_id) => { assert!(self.tcx.is_static(def_id)); assert!(!self.tcx.is_thread_local_static(def_id)); self.get_static(def_id) } + GlobalAlloc::TypeId { .. } => { + // Drop the provenance, the offset contains the bytes of the hash + let llval = self.const_usize(offset.bytes()); + return unsafe { llvm::LLVMConstIntToPtr(llval, llty) }; + } }; let base_addr_space = global_alloc.address_space(self); let llval = unsafe { @@ -346,7 +350,7 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { } fn const_data_from_alloc(&self, alloc: ConstAllocation<'_>) -> Self::Value { - const_alloc_to_llvm(self, alloc, /*static*/ false) + const_alloc_to_llvm(self, alloc.inner(), /*static*/ false) } fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value { diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 28f5282c6b0..21524fd2eb8 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -27,10 +27,9 @@ use crate::{base, debuginfo}; pub(crate) fn const_alloc_to_llvm<'ll>( cx: &CodegenCx<'ll, '_>, - alloc: ConstAllocation<'_>, + alloc: &Allocation, is_static: bool, ) -> &'ll Value { - let alloc = alloc.inner(); // We expect that callers of const_alloc_to_llvm will instead directly codegen a pointer or // integer for any &ZST where the ZST is a constant (i.e. not a static). We should never be // producing empty LLVM allocations as they're just adding noise to binaries and forcing less @@ -141,7 +140,7 @@ fn codegen_static_initializer<'ll, 'tcx>( def_id: DefId, ) -> Result<(&'ll Value, ConstAllocation<'tcx>), ErrorHandled> { let alloc = cx.tcx.eval_static_initializer(def_id)?; - Ok((const_alloc_to_llvm(cx, alloc, /*static*/ true), alloc)) + Ok((const_alloc_to_llvm(cx, alloc.inner(), /*static*/ true), alloc)) } fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) { diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 2896dfd5463..a639803ea60 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -186,7 +186,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { offset: Size, ) -> Self { let alloc_align = alloc.inner().align; - assert!(alloc_align >= layout.align.abi); + assert!(alloc_align >= layout.align.abi, "{alloc_align:?} < {:?}", layout.align.abi); let read_scalar = |start, size, s: abi::Scalar, ty| { match alloc.0.read_scalar( diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl index 22a1894ee72..b767ca9a3c2 100644 --- a/compiler/rustc_const_eval/messages.ftl +++ b/compiler/rustc_const_eval/messages.ftl @@ -78,6 +78,8 @@ const_eval_dealloc_kind_mismatch = const_eval_deref_function_pointer = accessing {$allocation} which contains a function +const_eval_deref_typeid_pointer = + accessing {$allocation} which contains a `TypeId` const_eval_deref_vtable_pointer = accessing {$allocation} which contains a vtable const_eval_division_by_zero = diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs index 9133a5fc8ef..49cd7138748 100644 --- a/compiler/rustc_const_eval/src/errors.rs +++ b/compiler/rustc_const_eval/src/errors.rs @@ -475,6 +475,7 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { WriteToReadOnly(_) => const_eval_write_to_read_only, DerefFunctionPointer(_) => const_eval_deref_function_pointer, DerefVTablePointer(_) => const_eval_deref_vtable_pointer, + DerefTypeIdPointer(_) => const_eval_deref_typeid_pointer, InvalidBool(_) => const_eval_invalid_bool, InvalidChar(_) => const_eval_invalid_char, InvalidTag(_) => const_eval_invalid_tag, @@ -588,7 +589,10 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { diag.arg("has", has.bytes()); diag.arg("msg", format!("{msg:?}")); } - WriteToReadOnly(alloc) | DerefFunctionPointer(alloc) | DerefVTablePointer(alloc) => { + WriteToReadOnly(alloc) + | DerefFunctionPointer(alloc) + | DerefVTablePointer(alloc) + | DerefTypeIdPointer(alloc) => { diag.arg("allocation", alloc); } InvalidBool(b) => { diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 378ed6d0e10..ee5382af0b2 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -4,8 +4,10 @@ use std::assert_matches::assert_matches; -use rustc_abi::Size; +use rustc_abi::{FieldIdx, Size}; use rustc_apfloat::ieee::{Double, Half, Quad, Single}; +use rustc_ast::Mutability; +use rustc_middle::mir::interpret::{AllocId, AllocInit, alloc_range}; use rustc_middle::mir::{self, BinOp, ConstValue, NonDivergingIntrinsic}; use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::{Ty, TyCtxt}; @@ -29,6 +31,37 @@ pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAll tcx.mk_const_alloc(alloc) } +pub(crate) fn alloc_type_id<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> AllocId { + let size = Size::from_bytes(16); + let align = tcx.data_layout.pointer_align(); + let mut alloc = Allocation::new(size, *align, AllocInit::Uninit, ()); + let ptr_size = tcx.data_layout.pointer_size(); + let type_id_hash = tcx.type_id_hash(ty).as_u128(); + alloc + .write_scalar( + &tcx, + alloc_range(Size::ZERO, Size::from_bytes(16)), + Scalar::from_u128(type_id_hash), + ) + .unwrap(); + + // Give the first pointer-size bytes provenance that knows about the type id + + let alloc_id = tcx.reserve_and_set_type_id_alloc(ty); + let offset = alloc + .read_scalar(&tcx, alloc_range(Size::ZERO, ptr_size), false) + .unwrap() + .to_target_usize(&tcx) + .unwrap(); + let ptr = Pointer::new(alloc_id.into(), Size::from_bytes(offset)); + let val = Scalar::from_pointer(ptr, &tcx); + alloc.write_scalar(&tcx, alloc_range(Size::ZERO, ptr_size), val).unwrap(); + + alloc.mutability = Mutability::Not; + + tcx.reserve_and_set_memory_alloc(tcx.mk_const_alloc(alloc)) +} + impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Returns `true` if emulation happened. /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own @@ -63,10 +96,52 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { sym::type_id => { let tp_ty = instance.args.type_at(0); ensure_monomorphic_enough(tcx, tp_ty)?; - let val = ConstValue::from_u128(tcx.type_id_hash(tp_ty).as_u128()); + let alloc_id = alloc_type_id(tcx, tp_ty); + let val = ConstValue::Indirect { alloc_id, offset: Size::ZERO }; let val = self.const_val_to_op(val, dest.layout.ty, Some(dest.layout))?; self.copy_op(&val, dest)?; } + sym::type_id_eq => { + // Both operands are `TypeId`, which is a newtype around an array of pointers. + // Project until we have the array elements. + let a_fields = self.project_field(&args[0], FieldIdx::ZERO)?; + let b_fields = self.project_field(&args[1], FieldIdx::ZERO)?; + + let mut a_fields = self.project_array_fields(&a_fields)?; + let mut b_fields = self.project_array_fields(&b_fields)?; + + let (_idx, a) = a_fields + .next(self)? + .expect("we know the layout of TypeId has at least 2 array elements"); + let a = self.deref_pointer(&a)?; + let (a, offset_a) = self.get_ptr_type_id(a.ptr())?; + + let (_idx, b) = b_fields + .next(self)? + .expect("we know the layout of TypeId has at least 2 array elements"); + let b = self.deref_pointer(&b)?; + let (b, offset_b) = self.get_ptr_type_id(b.ptr())?; + + let provenance_matches = a == b; + + let mut eq_id = offset_a == offset_b; + + while let Some((_, a)) = a_fields.next(self)? { + let (_, b) = b_fields.next(self)?.unwrap(); + + let a = self.read_target_usize(&a)?; + let b = self.read_target_usize(&b)?; + eq_id &= a == b; + } + + if !eq_id && provenance_matches { + throw_ub_format!( + "type_id_eq: one of the TypeId arguments is invalid, the hash does not match the type it represents" + ) + } + + self.write_scalar(Scalar::from_bool(provenance_matches), dest)?; + } sym::variant_count => { let tp_ty = instance.args.type_at(0); let ty = match tp_ty.kind() { diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index 524023b8104..bb301600135 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -15,9 +15,9 @@ use std::{fmt, ptr}; use rustc_abi::{Align, HasDataLayout, Size}; use rustc_ast::Mutability; use rustc_data_structures::fx::{FxHashSet, FxIndexMap}; -use rustc_middle::bug; use rustc_middle::mir::display_allocation; use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; +use rustc_middle::{bug, throw_ub_format}; use tracing::{debug, instrument, trace}; use super::{ @@ -346,6 +346,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { kind = "vtable", ) } + Some(GlobalAlloc::TypeId { .. }) => { + err_ub_custom!( + fluent::const_eval_invalid_dealloc, + alloc_id = alloc_id, + kind = "typeid", + ) + } Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => { err_ub_custom!( fluent::const_eval_invalid_dealloc, @@ -615,6 +622,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } Some(GlobalAlloc::Function { .. }) => throw_ub!(DerefFunctionPointer(id)), Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)), + Some(GlobalAlloc::TypeId { .. }) => throw_ub!(DerefTypeIdPointer(id)), None => throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccess)), Some(GlobalAlloc::Static(def_id)) => { assert!(self.tcx.is_static(def_id)); @@ -896,7 +904,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let (size, align) = global_alloc.size_and_align(*self.tcx, self.typing_env); let mutbl = global_alloc.mutability(*self.tcx, self.typing_env); let kind = match global_alloc { - GlobalAlloc::Static { .. } | GlobalAlloc::Memory { .. } => AllocKind::LiveData, + GlobalAlloc::TypeId { .. } + | GlobalAlloc::Static { .. } + | GlobalAlloc::Memory { .. } => AllocKind::LiveData, GlobalAlloc::Function { .. } => bug!("We already checked function pointers above"), GlobalAlloc::VTable { .. } => AllocKind::VTable, }; @@ -936,6 +946,19 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } } + /// Takes a pointer that is the first chunk of a `TypeId` and return the type that its + /// provenance refers to, as well as the segment of the hash that this pointer covers. + pub fn get_ptr_type_id( + &self, + ptr: Pointer>, + ) -> InterpResult<'tcx, (Ty<'tcx>, Size)> { + let (alloc_id, offset, _meta) = self.ptr_get_alloc_id(ptr, 0)?; + let GlobalAlloc::TypeId { ty } = self.tcx.global_alloc(alloc_id) else { + throw_ub_format!("type_id_eq: `TypeId` provenance is not a type id") + }; + interp_ok((ty, offset)) + } + pub fn get_ptr_fn( &self, ptr: Pointer>, @@ -1197,6 +1220,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for DumpAllocs<'a, 'tcx, M> { Some(GlobalAlloc::VTable(ty, dyn_ty)) => { write!(fmt, " (vtable: impl {dyn_ty} for {ty})")?; } + Some(GlobalAlloc::TypeId { ty }) => { + write!(fmt, " (typeid for {ty})")?; + } Some(GlobalAlloc::Static(did)) => { write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?; } diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index 306697d4ec9..f72c4418081 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -296,7 +296,11 @@ where base: &'a P, ) -> InterpResult<'tcx, ArrayIterator<'a, 'tcx, M::Provenance, P>> { let abi::FieldsShape::Array { stride, .. } = base.layout().fields else { - span_bug!(self.cur_span(), "project_array_fields: expected an array layout"); + span_bug!( + self.cur_span(), + "project_array_fields: expected an array layout, got {:#?}", + base.layout() + ); }; let len = base.len(self)?; let field_layout = base.layout().field(self, 0); diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index fc4d13af8c4..fc44490c96d 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -571,40 +571,42 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { let alloc_actual_mutbl = global_alloc.mutability(*self.ecx.tcx, self.ecx.typing_env); - if let GlobalAlloc::Static(did) = global_alloc { - let DefKind::Static { nested, .. } = self.ecx.tcx.def_kind(did) else { - bug!() - }; - // Special handling for pointers to statics (irrespective of their type). - assert!(!self.ecx.tcx.is_thread_local_static(did)); - assert!(self.ecx.tcx.is_static(did)); - // Mode-specific checks - match ctfe_mode { - CtfeValidationMode::Static { .. } - | CtfeValidationMode::Promoted { .. } => { - // We skip recursively checking other statics. These statics must be sound by - // themselves, and the only way to get broken statics here is by using - // unsafe code. - // The reasons we don't check other statics is twofold. For one, in all - // sound cases, the static was already validated on its own, and second, we - // trigger cycle errors if we try to compute the value of the other static - // and that static refers back to us (potentially through a promoted). - // This could miss some UB, but that's fine. - // We still walk nested allocations, as they are fundamentally part of this validation run. - // This means we will also recurse into nested statics of *other* - // statics, even though we do not recurse into other statics directly. - // That's somewhat inconsistent but harmless. - skip_recursive_check = !nested; - } - CtfeValidationMode::Const { .. } => { - // If this is mutable memory or an `extern static`, there's no point in checking it -- we'd - // just get errors trying to read the value. - if alloc_actual_mutbl.is_mut() || self.ecx.tcx.is_foreign_item(did) - { - skip_recursive_check = true; + match global_alloc { + GlobalAlloc::Static(did) => { + let DefKind::Static { nested, .. } = self.ecx.tcx.def_kind(did) else { + bug!() + }; + assert!(!self.ecx.tcx.is_thread_local_static(did)); + assert!(self.ecx.tcx.is_static(did)); + match ctfe_mode { + CtfeValidationMode::Static { .. } + | CtfeValidationMode::Promoted { .. } => { + // We skip recursively checking other statics. These statics must be sound by + // themselves, and the only way to get broken statics here is by using + // unsafe code. + // The reasons we don't check other statics is twofold. For one, in all + // sound cases, the static was already validated on its own, and second, we + // trigger cycle errors if we try to compute the value of the other static + // and that static refers back to us (potentially through a promoted). + // This could miss some UB, but that's fine. + // We still walk nested allocations, as they are fundamentally part of this validation run. + // This means we will also recurse into nested statics of *other* + // statics, even though we do not recurse into other statics directly. + // That's somewhat inconsistent but harmless. + skip_recursive_check = !nested; + } + CtfeValidationMode::Const { .. } => { + // If this is mutable memory or an `extern static`, there's no point in checking it -- we'd + // just get errors trying to read the value. + if alloc_actual_mutbl.is_mut() + || self.ecx.tcx.is_foreign_item(did) + { + skip_recursive_check = true; + } } } } + _ => (), } // If this allocation has size zero, there is no actual mutability here. diff --git a/compiler/rustc_hir/src/lang_items.rs b/compiler/rustc_hir/src/lang_items.rs index c11db63ba11..6347f1bfa71 100644 --- a/compiler/rustc_hir/src/lang_items.rs +++ b/compiler/rustc_hir/src/lang_items.rs @@ -274,6 +274,8 @@ language_item_table! { PartialOrd, sym::partial_ord, partial_ord_trait, Target::Trait, GenericRequirement::Exact(1); CVoid, sym::c_void, c_void, Target::Enum, GenericRequirement::None; + TypeId, sym::type_id, type_id, Target::Struct, GenericRequirement::None; + // A number of panic-related lang items. The `panic` item corresponds to divide-by-zero and // various panic cases with `match`. The `panic_bounds_check` item is for indexing arrays. // diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs index cebf7d1b532..e3532ade32f 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs @@ -93,6 +93,7 @@ fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: LocalDefId) -> hi | sym::three_way_compare | sym::discriminant_value | sym::type_id + | sym::type_id_eq | sym::select_unpredictable | sym::cold_path | sym::ptr_guaranteed_cmp @@ -220,7 +221,13 @@ pub(crate) fn check_intrinsic_type( sym::needs_drop => (1, 0, vec![], tcx.types.bool), sym::type_name => (1, 0, vec![], Ty::new_static_str(tcx)), - sym::type_id => (1, 0, vec![], tcx.types.u128), + sym::type_id => { + (1, 0, vec![], tcx.type_of(tcx.lang_items().type_id().unwrap()).instantiate_identity()) + } + sym::type_id_eq => { + let type_id = tcx.type_of(tcx.lang_items().type_id().unwrap()).instantiate_identity(); + (0, 0, vec![type_id, type_id], tcx.types.bool) + } sym::offset => (2, 0, vec![param(0), param(1)], param(0)), sym::arith_offset => ( 1, diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs index 8acb8fa9f80..71e0c943fbb 100644 --- a/compiler/rustc_middle/src/mir/interpret/error.rs +++ b/compiler/rustc_middle/src/mir/interpret/error.rs @@ -392,6 +392,8 @@ pub enum UndefinedBehaviorInfo<'tcx> { DerefFunctionPointer(AllocId), /// Trying to access the data behind a vtable pointer. DerefVTablePointer(AllocId), + /// Trying to access the actual type id. + DerefTypeIdPointer(AllocId), /// Using a non-boolean `u8` as bool. InvalidBool(u8), /// Using a non-character `u32` as character. diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs index 0b2645013ba..bed99a4ff2a 100644 --- a/compiler/rustc_middle/src/mir/interpret/mod.rs +++ b/compiler/rustc_middle/src/mir/interpret/mod.rs @@ -103,6 +103,7 @@ enum AllocDiscriminant { Fn, VTable, Static, + Type, } pub fn specialized_encode_alloc_id<'tcx, E: TyEncoder<'tcx>>( @@ -127,6 +128,11 @@ pub fn specialized_encode_alloc_id<'tcx, E: TyEncoder<'tcx>>( ty.encode(encoder); poly_trait_ref.encode(encoder); } + GlobalAlloc::TypeId { ty } => { + trace!("encoding {alloc_id:?} with {ty:#?}"); + AllocDiscriminant::Type.encode(encoder); + ty.encode(encoder); + } GlobalAlloc::Static(did) => { assert!(!tcx.is_thread_local_static(did)); // References to statics doesn't need to know about their allocations, @@ -228,6 +234,12 @@ impl<'s> AllocDecodingSession<'s> { trace!("decoded vtable alloc instance: {ty:?}, {poly_trait_ref:?}"); decoder.interner().reserve_and_set_vtable_alloc(ty, poly_trait_ref, CTFE_ALLOC_SALT) } + AllocDiscriminant::Type => { + trace!("creating typeid alloc ID"); + let ty = Decodable::decode(decoder); + trace!("decoded typid: {ty:?}"); + decoder.interner().reserve_and_set_type_id_alloc(ty) + } AllocDiscriminant::Static => { trace!("creating extern static alloc ID"); let did = >::decode(decoder); @@ -258,6 +270,9 @@ pub enum GlobalAlloc<'tcx> { Static(DefId), /// The alloc ID points to memory. Memory(ConstAllocation<'tcx>), + /// The first pointer-sized segment of a type id. On 64 bit systems, the 128 bit type id + /// is split into two segments, on 32 bit systems there are 4 segments, and so on. + TypeId { ty: Ty<'tcx> }, } impl<'tcx> GlobalAlloc<'tcx> { @@ -296,9 +311,10 @@ impl<'tcx> GlobalAlloc<'tcx> { pub fn address_space(&self, cx: &impl HasDataLayout) -> AddressSpace { match self { GlobalAlloc::Function { .. } => cx.data_layout().instruction_address_space, - GlobalAlloc::Static(..) | GlobalAlloc::Memory(..) | GlobalAlloc::VTable(..) => { - AddressSpace::ZERO - } + GlobalAlloc::TypeId { .. } + | GlobalAlloc::Static(..) + | GlobalAlloc::Memory(..) + | GlobalAlloc::VTable(..) => AddressSpace::ZERO, } } @@ -334,7 +350,7 @@ impl<'tcx> GlobalAlloc<'tcx> { } } GlobalAlloc::Memory(alloc) => alloc.inner().mutability, - GlobalAlloc::Function { .. } | GlobalAlloc::VTable(..) => { + GlobalAlloc::TypeId { .. } | GlobalAlloc::Function { .. } | GlobalAlloc::VTable(..) => { // These are immutable. Mutability::Not } @@ -380,8 +396,10 @@ impl<'tcx> GlobalAlloc<'tcx> { GlobalAlloc::Function { .. } => (Size::ZERO, Align::ONE), GlobalAlloc::VTable(..) => { // No data to be accessed here. But vtables are pointer-aligned. - return (Size::ZERO, tcx.data_layout.pointer_align().abi); + (Size::ZERO, tcx.data_layout.pointer_align().abi) } + // Fake allocation, there's nothing to access here + GlobalAlloc::TypeId { .. } => (Size::ZERO, Align::ONE), } } } @@ -487,6 +505,11 @@ impl<'tcx> TyCtxt<'tcx> { self.reserve_and_set_dedup(GlobalAlloc::VTable(ty, dyn_ty), salt) } + /// Generates an [AllocId] for a [core::any::TypeId]. Will get deduplicated. + pub fn reserve_and_set_type_id_alloc(self, ty: Ty<'tcx>) -> AllocId { + self.reserve_and_set_dedup(GlobalAlloc::TypeId { ty }, 0) + } + /// Interns the `Allocation` and return a new `AllocId`, even if there's already an identical /// `Allocation` with a different `AllocId`. /// Statics with identical content will still point to the same `Allocation`, i.e., diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs index e9f3fb6ac8d..8e403dfddae 100644 --- a/compiler/rustc_middle/src/mir/pretty.rs +++ b/compiler/rustc_middle/src/mir/pretty.rs @@ -1621,6 +1621,7 @@ pub fn write_allocations<'tcx>( Some(GlobalAlloc::VTable(ty, dyn_ty)) => { write!(w, " (vtable: impl {dyn_ty} for {ty})")? } + Some(GlobalAlloc::TypeId { ty }) => write!(w, " (typeid for {ty})")?, Some(GlobalAlloc::Static(did)) if !tcx.is_foreign_item(did) => { write!(w, " (static: {}", tcx.def_path_str(did))?; if body.phase <= MirPhase::Runtime(RuntimePhase::PostCleanup) diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index 4e078847815..7c48a4b0885 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -1773,6 +1773,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { } Some(GlobalAlloc::Function { .. }) => p!(""), Some(GlobalAlloc::VTable(..)) => p!(""), + Some(GlobalAlloc::TypeId { .. }) => p!(""), None => p!(""), } return Ok(()); diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs index 131064f9832..91c8e64ce9a 100644 --- a/compiler/rustc_monomorphize/src/collector.rs +++ b/compiler/rustc_monomorphize/src/collector.rs @@ -1219,6 +1219,7 @@ fn collect_alloc<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut MonoIt )); collect_alloc(tcx, alloc_id, output) } + GlobalAlloc::TypeId { .. } => {} } } diff --git a/compiler/rustc_passes/src/reachable.rs b/compiler/rustc_passes/src/reachable.rs index 7e15267a953..b49e8118fe3 100644 --- a/compiler/rustc_passes/src/reachable.rs +++ b/compiler/rustc_passes/src/reachable.rs @@ -325,6 +325,7 @@ impl<'tcx> ReachableContext<'tcx> { self.visit(args); } } + GlobalAlloc::TypeId { ty, .. } => self.visit(ty), GlobalAlloc::Memory(alloc) => self.propagate_from_alloc(alloc), } } diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 09f01d8704e..4df91cc3429 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -2194,6 +2194,7 @@ symbols! { type_changing_struct_update, type_const, type_id, + type_id_eq, type_ir, type_ir_infer_ctxt_like, type_ir_inherent, diff --git a/compiler/stable_mir/src/mir/alloc.rs b/compiler/stable_mir/src/mir/alloc.rs index 0d45e59885c..9a94551f3ec 100644 --- a/compiler/stable_mir/src/mir/alloc.rs +++ b/compiler/stable_mir/src/mir/alloc.rs @@ -23,6 +23,9 @@ pub enum GlobalAlloc { Static(StaticDef), /// The alloc ID points to memory. Memory(Allocation), + /// The first pointer-sized segment of a type id. On 64 bit systems, the 128 bit type id + /// is split into two segments, on 32 bit systems there are 4 segments, and so on. + TypeId { ty: Ty }, } impl From for GlobalAlloc { diff --git a/compiler/stable_mir/src/unstable/convert/stable/mir.rs b/compiler/stable_mir/src/unstable/convert/stable/mir.rs index f6f4706e40b..ad39fc37600 100644 --- a/compiler/stable_mir/src/unstable/convert/stable/mir.rs +++ b/compiler/stable_mir/src/unstable/convert/stable/mir.rs @@ -864,6 +864,9 @@ impl<'tcx> Stable<'tcx> for mir::interpret::GlobalAlloc<'tcx> { mir::interpret::GlobalAlloc::Memory(alloc) => { GlobalAlloc::Memory(alloc.stable(tables, cx)) } + mir::interpret::GlobalAlloc::TypeId { ty } => { + GlobalAlloc::TypeId { ty: ty.stable(tables, cx) } + } } } } diff --git a/library/core/Cargo.toml b/library/core/Cargo.toml index f88661ee001..3e34e03a61e 100644 --- a/library/core/Cargo.toml +++ b/library/core/Cargo.toml @@ -23,8 +23,6 @@ optimize_for_size = [] # Make `RefCell` store additional debugging information, which is printed out when # a borrow error occurs debug_refcell = [] -# Make `TypeId` store a reference to the name of the type, so that it can print that name. -debug_typeid = [] [lints.rust.unexpected_cfgs] level = "warn" diff --git a/library/core/src/any.rs b/library/core/src/any.rs index 01dce114592..39cdf6efda0 100644 --- a/library/core/src/any.rs +++ b/library/core/src/any.rs @@ -707,19 +707,52 @@ impl dyn Any + Send + Sync { /// ``` #[derive(Clone, Copy, Eq, PartialOrd, Ord)] #[stable(feature = "rust1", since = "1.0.0")] +#[lang = "type_id"] pub struct TypeId { - // We avoid using `u128` because that imposes higher alignment requirements on many platforms. - // See issue #115620 for more information. - t: (u64, u64), - #[cfg(feature = "debug_typeid")] - name: &'static str, + /// This needs to be an array of pointers, since there is provenance + /// in the first array field. This provenance knows exactly which type + /// the TypeId actually is, allowing CTFE and miri to operate based off it. + /// At runtime all the pointers in the array contain bits of the hash, making + /// the entire `TypeId` actually just be a `u128` hash of the type. + pub(crate) data: [*const (); 16 / size_of::<*const ()>()], } +// SAFETY: the raw pointer is always an integer #[stable(feature = "rust1", since = "1.0.0")] -impl PartialEq for TypeId { +unsafe impl Send for TypeId {} +// SAFETY: the raw pointer is always an integer +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl Sync for TypeId {} + +#[stable(feature = "rust1", since = "1.0.0")] +#[rustc_const_unstable(feature = "const_type_id", issue = "77125")] +impl const PartialEq for TypeId { #[inline] fn eq(&self, other: &Self) -> bool { - self.t == other.t + #[cfg(miri)] + return crate::intrinsics::type_id_eq(*self, *other); + #[cfg(not(miri))] + { + let this = self; + crate::intrinsics::const_eval_select!( + @capture { this: &TypeId, other: &TypeId } -> bool: + if const { + crate::intrinsics::type_id_eq(*this, *other) + } else { + // Ideally we would just invoke `type_id_eq` unconditionally here, + // but since we do not MIR inline intrinsics, because backends + // may want to override them (and miri does!), MIR opts do not + // clean up this call sufficiently for LLVM to turn repeated calls + // of `TypeId` comparisons against one specific `TypeId` into + // a lookup table. + // SAFETY: We know that at runtime none of the bits have provenance and all bits + // are initialized. So we can just convert the whole thing to a `u128` and compare that. + unsafe { + crate::mem::transmute::<_, u128>(*this) == crate::mem::transmute::<_, u128>(*other) + } + } + ) + } } } @@ -742,19 +775,19 @@ impl TypeId { #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_unstable(feature = "const_type_id", issue = "77125")] pub const fn of() -> TypeId { - let t: u128 = const { intrinsics::type_id::() }; - let t1 = (t >> 64) as u64; - let t2 = t as u64; - - TypeId { - t: (t1, t2), - #[cfg(feature = "debug_typeid")] - name: type_name::(), - } + const { intrinsics::type_id::() } } fn as_u128(self) -> u128 { - u128::from(self.t.0) << 64 | u128::from(self.t.1) + let mut bytes = [0; 16]; + + // This is a provenance-stripping memcpy. + for (i, chunk) in self.data.iter().copied().enumerate() { + let chunk = chunk.expose_provenance().to_ne_bytes(); + let start = i * chunk.len(); + bytes[start..(start + chunk.len())].copy_from_slice(&chunk); + } + u128::from_ne_bytes(bytes) } } @@ -774,22 +807,19 @@ impl hash::Hash for TypeId { // - It is correct to do so -- only hashing a subset of `self` is still // compatible with an `Eq` implementation that considers the entire // value, as ours does. - self.t.1.hash(state); + let data = + // SAFETY: The `offset` stays in-bounds, it just moves the pointer to the 2nd half of the `TypeId`. + // Only the first ptr-sized chunk ever has provenance, so that second half is always + // fine to read at integer type. + unsafe { crate::ptr::read_unaligned(self.data.as_ptr().cast::().offset(1)) }; + data.hash(state); } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for TypeId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - #[cfg(feature = "debug_typeid")] - { - write!(f, "TypeId({:#034x} = {})", self.as_u128(), self.name)?; - } - #[cfg(not(feature = "debug_typeid"))] - { - write!(f, "TypeId({:#034x})", self.as_u128())?; - } - Ok(()) + write!(f, "TypeId({:#034x})", self.as_u128()) } } diff --git a/library/core/src/intrinsics/mod.rs b/library/core/src/intrinsics/mod.rs index 791d10eda6d..f5dcfeebed8 100644 --- a/library/core/src/intrinsics/mod.rs +++ b/library/core/src/intrinsics/mod.rs @@ -2724,7 +2724,20 @@ pub const fn type_name() -> &'static str; #[rustc_nounwind] #[unstable(feature = "core_intrinsics", issue = "none")] #[rustc_intrinsic] -pub const fn type_id() -> u128; +pub const fn type_id() -> crate::any::TypeId; + +/// Tests (at compile-time) if two [`crate::any::TypeId`] instances identify the +/// same type. This is necessary because at const-eval time the actual discriminating +/// data is opaque and cannot be inspected directly. +/// +/// The stabilized version of this intrinsic is the [PartialEq] impl for [`core::any::TypeId`]. +#[rustc_nounwind] +#[unstable(feature = "core_intrinsics", issue = "none")] +#[rustc_intrinsic] +#[rustc_do_not_const_check] +pub const fn type_id_eq(a: crate::any::TypeId, b: crate::any::TypeId) -> bool { + a.data == b.data +} /// Lowers in MIR to `Rvalue::Aggregate` with `AggregateKind::RawPtr`. /// diff --git a/library/coretests/tests/any.rs b/library/coretests/tests/any.rs index 117ef004238..25002617d0b 100644 --- a/library/coretests/tests/any.rs +++ b/library/coretests/tests/any.rs @@ -118,14 +118,6 @@ fn any_unsized() { is_any::<[i32]>(); } -#[cfg(feature = "debug_typeid")] -#[test] -fn debug_typeid_includes_name() { - let type_id = TypeId::of::<[usize; 2]>(); - let debug_str = format!("{type_id:?}"); - assert!(debug_str.ends_with("= [usize; 2])"), "{debug_str:?} did not match"); -} - #[test] fn distinct_type_names() { // https://github.com/rust-lang/rust/issues/84666 diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml index 3a75d316871..62ece4b6961 100644 --- a/library/std/Cargo.toml +++ b/library/std/Cargo.toml @@ -113,8 +113,6 @@ optimize_for_size = ["core/optimize_for_size", "alloc/optimize_for_size"] # Make `RefCell` store additional debugging information, which is printed out when # a borrow error occurs debug_refcell = ["core/debug_refcell"] -# Make `TypeId` store a reference to the name of the type, so that it can print that name. -debug_typeid = ["core/debug_typeid"] # Enable std_detect default features for stdarch/crates/std_detect: diff --git a/library/sysroot/Cargo.toml b/library/sysroot/Cargo.toml index 3adc0224971..c4937c36d4c 100644 --- a/library/sysroot/Cargo.toml +++ b/library/sysroot/Cargo.toml @@ -22,7 +22,6 @@ compiler-builtins-no-asm = ["std/compiler-builtins-no-asm"] compiler-builtins-no-f16-f128 = ["std/compiler-builtins-no-f16-f128"] compiler-builtins-mangled-names = ["std/compiler-builtins-mangled-names"] debug_refcell = ["std/debug_refcell"] -debug_typeid = ["std/debug_typeid"] llvm-libunwind = ["std/llvm-libunwind"] system-llvm-libunwind = ["std/system-llvm-libunwind"] optimize_for_size = ["std/optimize_for_size"] diff --git a/tests/codegen/error-provide.rs b/tests/codegen/error-provide.rs index 25a66078fd4..7f091e34359 100644 --- a/tests/codegen/error-provide.rs +++ b/tests/codegen/error-provide.rs @@ -37,9 +37,9 @@ impl std::error::Error for MyError { // and eliminate redundant ones, rather than compare one-by-one. // CHECK-NEXT: start: - // CHECK-NEXT: %[[SCRUTINEE:[^ ]+]] = load i64, ptr - // CHECK-NEXT: switch i64 %[[SCRUTINEE]], label %{{.*}} [ - // CHECK-COUNT-3: i64 {{.*}}, label %{{.*}} + // CHECK-NEXT: %[[SCRUTINEE:[^ ]+]] = load i128, ptr + // CHECK-NEXT: switch i128 %[[SCRUTINEE]], label %{{.*}} [ + // CHECK-COUNT-3: i128 {{.*}}, label %{{.*}} // CHECK-NEXT: ] request .provide_ref::(&self.backtrace1) diff --git a/tests/ui/const-generics/issues/issue-90318.rs b/tests/ui/const-generics/issues/issue-90318.rs index 317ddad49cd..dfba90a5575 100644 --- a/tests/ui/const-generics/issues/issue-90318.rs +++ b/tests/ui/const-generics/issues/issue-90318.rs @@ -1,5 +1,6 @@ #![feature(const_type_id)] #![feature(generic_const_exprs)] +#![feature(const_trait_impl)] #![feature(core_intrinsics)] #![allow(incomplete_features)] @@ -13,7 +14,6 @@ fn consume(_val: T) where If<{ TypeId::of::() != TypeId::of::<()>() }>: True, //~^ ERROR overly complex generic constant - //~| ERROR: cannot call { } @@ -21,7 +21,6 @@ fn test() where If<{ TypeId::of::() != TypeId::of::<()>() }>: True, //~^ ERROR overly complex generic constant - //~| ERROR: cannot call { } diff --git a/tests/ui/const-generics/issues/issue-90318.stderr b/tests/ui/const-generics/issues/issue-90318.stderr index 9c7cb5ceb58..7031230db91 100644 --- a/tests/ui/const-generics/issues/issue-90318.stderr +++ b/tests/ui/const-generics/issues/issue-90318.stderr @@ -1,5 +1,5 @@ error: overly complex generic constant - --> $DIR/issue-90318.rs:14:8 + --> $DIR/issue-90318.rs:15:8 | LL | If<{ TypeId::of::() != TypeId::of::<()>() }>: True, | ^^-----------------^^^^^^^^^^^^^^^^^^^^^^^^ @@ -20,26 +20,5 @@ LL | If<{ TypeId::of::() != TypeId::of::<()>() }>: True, = help: consider moving this anonymous constant into a `const` function = note: this operation may be supported in the future -error[E0015]: cannot call non-const operator in constants - --> $DIR/issue-90318.rs:14:10 - | -LL | If<{ TypeId::of::() != TypeId::of::<()>() }>: True, - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | -note: impl defined here, but it is not `const` - --> $SRC_DIR/core/src/any.rs:LL:COL - = note: calls in constants are limited to constant functions, tuple structs and tuple variants - -error[E0015]: cannot call non-const operator in constants - --> $DIR/issue-90318.rs:22:10 - | -LL | If<{ TypeId::of::() != TypeId::of::<()>() }>: True, - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | -note: impl defined here, but it is not `const` - --> $SRC_DIR/core/src/any.rs:LL:COL - = note: calls in constants are limited to constant functions, tuple structs and tuple variants - -error: aborting due to 4 previous errors +error: aborting due to 2 previous errors -For more information about this error, try `rustc --explain E0015`. diff --git a/tests/ui/consts/const_cmp_type_id.rs b/tests/ui/consts/const_cmp_type_id.rs index dca0615083a..def615bd92b 100644 --- a/tests/ui/consts/const_cmp_type_id.rs +++ b/tests/ui/consts/const_cmp_type_id.rs @@ -6,10 +6,9 @@ use std::any::TypeId; fn main() { const { assert!(TypeId::of::() == TypeId::of::()); - //~^ ERROR the trait bound `TypeId: const PartialEq` is not satisfied assert!(TypeId::of::<()>() != TypeId::of::()); - //~^ ERROR the trait bound `TypeId: const PartialEq` is not satisfied let _a = TypeId::of::() < TypeId::of::(); + //~^ ERROR: cannot call non-const operator in constants // can't assert `_a` because it is not deterministic // FIXME(const_trait_impl) make it pass } diff --git a/tests/ui/consts/const_cmp_type_id.stderr b/tests/ui/consts/const_cmp_type_id.stderr index a8242a200ef..540eec5098b 100644 --- a/tests/ui/consts/const_cmp_type_id.stderr +++ b/tests/ui/consts/const_cmp_type_id.stderr @@ -1,15 +1,13 @@ -error[E0277]: the trait bound `TypeId: const PartialEq` is not satisfied - --> $DIR/const_cmp_type_id.rs:8:17 +error[E0015]: cannot call non-const operator in constants + --> $DIR/const_cmp_type_id.rs:10:18 | -LL | assert!(TypeId::of::() == TypeId::of::()); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -error[E0277]: the trait bound `TypeId: const PartialEq` is not satisfied - --> $DIR/const_cmp_type_id.rs:10:17 +LL | let _a = TypeId::of::() < TypeId::of::(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | -LL | assert!(TypeId::of::<()>() != TypeId::of::()); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +note: impl defined here, but it is not `const` + --> $SRC_DIR/core/src/any.rs:LL:COL + = note: calls in constants are limited to constant functions, tuple structs and tuple variants -error: aborting due to 2 previous errors +error: aborting due to 1 previous error -For more information about this error, try `rustc --explain E0277`. +For more information about this error, try `rustc --explain E0015`. diff --git a/tests/ui/consts/const_transmute_type_id.rs b/tests/ui/consts/const_transmute_type_id.rs new file mode 100644 index 00000000000..56ead6a622b --- /dev/null +++ b/tests/ui/consts/const_transmute_type_id.rs @@ -0,0 +1,11 @@ +#![feature(const_type_id, const_trait_impl)] + +use std::any::TypeId; + +const _: () = { + let id = TypeId::of::(); + let id: u8 = unsafe { (&raw const id).cast::().read() }; + //~^ ERROR: unable to turn pointer into integer +}; + +fn main() {} diff --git a/tests/ui/consts/const_transmute_type_id.stderr b/tests/ui/consts/const_transmute_type_id.stderr new file mode 100644 index 00000000000..85bd4ea2736 --- /dev/null +++ b/tests/ui/consts/const_transmute_type_id.stderr @@ -0,0 +1,12 @@ +error[E0080]: unable to turn pointer into integer + --> $DIR/const_transmute_type_id.rs:7:27 + | +LL | let id: u8 = unsafe { (&raw const id).cast::().read() }; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ evaluation of `_` failed here + | + = help: this code performed an operation that depends on the underlying bytes representing a pointer + = help: the absolute address of a pointer is not known at compile-time, so such operations are not supported + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0080`. diff --git a/tests/ui/consts/const_transmute_type_id2.rs b/tests/ui/consts/const_transmute_type_id2.rs new file mode 100644 index 00000000000..e29cf8171ac --- /dev/null +++ b/tests/ui/consts/const_transmute_type_id2.rs @@ -0,0 +1,14 @@ +//@ normalize-stderr: "0x(ff)+" -> "" + +#![feature(const_type_id, const_trait_impl)] + +use std::any::TypeId; + +const _: () = { + let a: TypeId = unsafe { std::mem::transmute(u128::MAX) }; + let b: TypeId = unsafe { std::mem::transmute(u128::MAX) }; + assert!(a == b); + //~^ ERROR: pointer must point to some allocation +}; + +fn main() {} diff --git a/tests/ui/consts/const_transmute_type_id2.stderr b/tests/ui/consts/const_transmute_type_id2.stderr new file mode 100644 index 00000000000..5646eb1257d --- /dev/null +++ b/tests/ui/consts/const_transmute_type_id2.stderr @@ -0,0 +1,15 @@ +error[E0080]: pointer not dereferenceable: pointer must point to some allocation, but got [noalloc] which is a dangling pointer (it has no provenance) + --> $DIR/const_transmute_type_id2.rs:10:13 + | +LL | assert!(a == b); + | ^^^^^^ evaluation of `_` failed inside this call + | +note: inside `::eq` + --> $SRC_DIR/core/src/any.rs:LL:COL +note: inside `::eq::compiletime` + --> $SRC_DIR/core/src/any.rs:LL:COL + = note: this error originates in the macro `$crate::intrinsics::const_eval_select` which comes from the expansion of the macro `crate::intrinsics::const_eval_select` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0080`. diff --git a/tests/ui/consts/const_transmute_type_id3.rs b/tests/ui/consts/const_transmute_type_id3.rs new file mode 100644 index 00000000000..a870d6e7e80 --- /dev/null +++ b/tests/ui/consts/const_transmute_type_id3.rs @@ -0,0 +1,16 @@ +#![feature(const_type_id, const_trait_impl)] + +use std::any::TypeId; + +const _: () = { + let a = TypeId::of::<()>(); + let mut b = TypeId::of::<()>(); + unsafe { + let ptr = &mut b as *mut TypeId as *mut usize; + std::ptr::write(ptr.offset(1), 999); + } + assert!(a == b); + //~^ ERROR: one of the TypeId arguments is invalid, the hash does not match the type it represents +}; + +fn main() {} diff --git a/tests/ui/consts/const_transmute_type_id3.stderr b/tests/ui/consts/const_transmute_type_id3.stderr new file mode 100644 index 00000000000..8cfdcfebaa4 --- /dev/null +++ b/tests/ui/consts/const_transmute_type_id3.stderr @@ -0,0 +1,15 @@ +error[E0080]: type_id_eq: one of the TypeId arguments is invalid, the hash does not match the type it represents + --> $DIR/const_transmute_type_id3.rs:12:13 + | +LL | assert!(a == b); + | ^^^^^^ evaluation of `_` failed inside this call + | +note: inside `::eq` + --> $SRC_DIR/core/src/any.rs:LL:COL +note: inside `::eq::compiletime` + --> $SRC_DIR/core/src/any.rs:LL:COL + = note: this error originates in the macro `$crate::intrinsics::const_eval_select` which comes from the expansion of the macro `crate::intrinsics::const_eval_select` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0080`. diff --git a/tests/ui/consts/const_transmute_type_id4.rs b/tests/ui/consts/const_transmute_type_id4.rs new file mode 100644 index 00000000000..bc71f961a51 --- /dev/null +++ b/tests/ui/consts/const_transmute_type_id4.rs @@ -0,0 +1,16 @@ +#![feature(const_type_id, const_trait_impl)] + +use std::any::TypeId; + +const _: () = { + let a = TypeId::of::<()>(); + let mut b = TypeId::of::<()>(); + unsafe { + let ptr = &mut b as *mut TypeId as *mut *const (); + std::ptr::write(ptr.offset(0), main as fn() as *const ()); + } + assert!(a == b); + //~^ ERROR: type_id_eq: `TypeId` provenance is not a type id +}; + +fn main() {} diff --git a/tests/ui/consts/const_transmute_type_id4.stderr b/tests/ui/consts/const_transmute_type_id4.stderr new file mode 100644 index 00000000000..b418a79d7f0 --- /dev/null +++ b/tests/ui/consts/const_transmute_type_id4.stderr @@ -0,0 +1,15 @@ +error[E0080]: type_id_eq: `TypeId` provenance is not a type id + --> $DIR/const_transmute_type_id4.rs:12:13 + | +LL | assert!(a == b); + | ^^^^^^ evaluation of `_` failed inside this call + | +note: inside `::eq` + --> $SRC_DIR/core/src/any.rs:LL:COL +note: inside `::eq::compiletime` + --> $SRC_DIR/core/src/any.rs:LL:COL + = note: this error originates in the macro `$crate::intrinsics::const_eval_select` which comes from the expansion of the macro `crate::intrinsics::const_eval_select` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0080`. diff --git a/tests/ui/consts/issue-73976-monomorphic.rs b/tests/ui/consts/issue-73976-monomorphic.rs index 561c1976051..3bfdb397afb 100644 --- a/tests/ui/consts/issue-73976-monomorphic.rs +++ b/tests/ui/consts/issue-73976-monomorphic.rs @@ -1,4 +1,4 @@ -//@ known-bug: #110395 +//@ check-pass // // This test is complement to the test in issue-73976-polymorphic.rs. // In that test we ensure that polymorphic use of type_id and type_name in patterns diff --git a/tests/ui/consts/issue-73976-monomorphic.stderr b/tests/ui/consts/issue-73976-monomorphic.stderr deleted file mode 100644 index 367d5be09da..00000000000 --- a/tests/ui/consts/issue-73976-monomorphic.stderr +++ /dev/null @@ -1,9 +0,0 @@ -error[E0277]: the trait bound `TypeId: [const] PartialEq` is not satisfied - --> $DIR/issue-73976-monomorphic.rs:21:5 - | -LL | GetTypeId::::VALUE == GetTypeId::::VALUE - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -error: aborting due to 1 previous error - -For more information about this error, try `rustc --explain E0277`. -- cgit 1.4.1-3-g733a5 From 84eeca2e2fc40bb8d6641846f18af9d8fc6a9681 Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Thu, 10 Jul 2025 07:27:41 +0000 Subject: Make some "safe" llvm ops actually sound --- compiler/rustc_codegen_llvm/src/back/write.rs | 2 +- compiler/rustc_codegen_llvm/src/builder/autodiff.rs | 2 +- compiler/rustc_codegen_llvm/src/consts.rs | 2 +- compiler/rustc_codegen_llvm/src/llvm/mod.rs | 10 ++++++---- 4 files changed, 9 insertions(+), 7 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index bde6a9cf4bc..506286fc255 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -1182,7 +1182,7 @@ fn create_msvc_imps( .filter_map(|val| { // Exclude some symbols that we know are not Rust symbols. let name = llvm::get_value_name(val); - if ignored(name) { None } else { Some((val, name)) } + if ignored(&name) { None } else { Some((val, name)) } }) .map(move |(val, name)| { let mut imp_name = prefix.as_bytes().to_vec(); diff --git a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs index b07d9a5cfca..5afb9a60d42 100644 --- a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs +++ b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs @@ -306,7 +306,7 @@ fn generate_enzyme_call<'ll>( // add outer_fn name to ad_name to make it unique, in case users apply autodiff to multiple // functions. Unwrap will only panic, if LLVM gave us an invalid string. let name = llvm::get_value_name(outer_fn); - let outer_fn_name = std::str::from_utf8(name).unwrap(); + let outer_fn_name = std::str::from_utf8(&name).unwrap(); ad_name.push_str(outer_fn_name); // Let us assume the user wrote the following function square: diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 28f5282c6b0..9c5008e0304 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -430,7 +430,7 @@ impl<'ll> CodegenCx<'ll, '_> { // specific rules on what can be cast. So instead of adding a new way to // generate static initializers that match the static's type, we picked // the easier option and retroactively change the type of the static item itself. - let name = llvm::get_value_name(g).to_vec(); + let name = llvm::get_value_name(g); llvm::set_value_name(g, b""); let linkage = llvm::get_linkage(g); diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs index 661174a80df..3fc83fca352 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs @@ -211,7 +211,7 @@ pub(crate) fn SetFunctionCallConv(fn_: &Value, cc: CallConv) { // function. // For more details on COMDAT sections see e.g., https://www.airs.com/blog/archives/52 pub(crate) fn SetUniqueComdat(llmod: &Module, val: &Value) { - let name_buf = get_value_name(val).to_vec(); + let name_buf = get_value_name(val); let name = CString::from_vec_with_nul(name_buf).or_else(|buf| CString::new(buf.into_bytes())).unwrap(); set_comdat(llmod, val, &name); @@ -319,12 +319,14 @@ pub(crate) fn get_param(llfn: &Value, index: c_uint) -> &Value { } } -/// Safe wrapper for `LLVMGetValueName2` into a byte slice -pub(crate) fn get_value_name(value: &Value) -> &[u8] { +/// Safe wrapper for `LLVMGetValueName2` +/// Needs to allocate the value, because `set_value_name` will invalidate +/// the pointer. +pub(crate) fn get_value_name(value: &Value) -> Vec { unsafe { let mut len = 0; let data = LLVMGetValueName2(value, &mut len); - std::slice::from_raw_parts(data.cast(), len) + std::slice::from_raw_parts(data.cast(), len).to_vec() } } -- cgit 1.4.1-3-g733a5 From 86349e31dde1f9aa5a8a519ff0213c5461f7f4d7 Mon Sep 17 00:00:00 2001 From: Jonathan Brouwer Date: Sat, 12 Jul 2025 22:53:51 +0200 Subject: Port `#[omit_gdb_pretty_printer_section]` to the new attribute parsing infrastructure Signed-off-by: Jonathan Brouwer --- compiler/rustc_attr_data_structures/src/attributes.rs | 3 +++ .../rustc_attr_data_structures/src/encode_cross_crate.rs | 1 + .../rustc_attr_parsing/src/attributes/codegen_attrs.rs | 8 ++++++++ compiler/rustc_attr_parsing/src/context.rs | 5 +++-- compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs | 5 ++--- compiler/rustc_parse/src/validate_attr.rs | 1 + compiler/rustc_passes/src/check_attr.rs | 4 ++-- tests/ui/attributes/malformed-attrs.stderr | 15 +++++++++------ 8 files changed, 29 insertions(+), 13 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_attr_data_structures/src/attributes.rs b/compiler/rustc_attr_data_structures/src/attributes.rs index f61efcf2388..2e2d1fd5f30 100644 --- a/compiler/rustc_attr_data_structures/src/attributes.rs +++ b/compiler/rustc_attr_data_structures/src/attributes.rs @@ -328,6 +328,9 @@ pub enum AttributeKind { /// Represents `#[non_exhaustive]` NonExhaustive(Span), + /// Represents `#[omit_gdb_pretty_printer_section]` + OmitGdbPrettyPrinterSection, + /// Represents `#[optimize(size|speed)]` Optimize(OptimizeAttr, Span), diff --git a/compiler/rustc_attr_data_structures/src/encode_cross_crate.rs b/compiler/rustc_attr_data_structures/src/encode_cross_crate.rs index ad587523e03..342e995541f 100644 --- a/compiler/rustc_attr_data_structures/src/encode_cross_crate.rs +++ b/compiler/rustc_attr_data_structures/src/encode_cross_crate.rs @@ -50,6 +50,7 @@ impl AttributeKind { NoImplicitPrelude(..) => No, NoMangle(..) => No, NonExhaustive(..) => Yes, + OmitGdbPrettyPrinterSection => No, Optimize(..) => No, ParenSugar(..) => No, PassByValue(..) => Yes, diff --git a/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs b/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs index fdec09edaa1..34d9b048348 100644 --- a/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs +++ b/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs @@ -334,3 +334,11 @@ impl CombineAttributeParser for TargetFeatureParser { features } } + +pub(crate) struct OmitGdbPrettyPrinterSectionParser; + +impl NoArgsAttributeParser for OmitGdbPrettyPrinterSectionParser { + const PATH: &[Symbol] = &[sym::omit_gdb_pretty_printer_section]; + const ON_DUPLICATE: OnDuplicate = OnDuplicate::Error; + const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::OmitGdbPrettyPrinterSection; +} diff --git a/compiler/rustc_attr_parsing/src/context.rs b/compiler/rustc_attr_parsing/src/context.rs index cc10bb3098a..ca0834972f7 100644 --- a/compiler/rustc_attr_parsing/src/context.rs +++ b/compiler/rustc_attr_parsing/src/context.rs @@ -16,8 +16,8 @@ use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span, Symbol, sym}; use crate::attributes::allow_unstable::{AllowConstFnUnstableParser, AllowInternalUnstableParser}; use crate::attributes::codegen_attrs::{ - ColdParser, ExportNameParser, NakedParser, NoMangleParser, OptimizeParser, TargetFeatureParser, - TrackCallerParser, UsedParser, + ColdParser, ExportNameParser, NakedParser, NoMangleParser, OmitGdbPrettyPrinterSectionParser, + OptimizeParser, TargetFeatureParser, TrackCallerParser, UsedParser, }; use crate::attributes::confusables::ConfusablesParser; use crate::attributes::deprecation::DeprecationParser; @@ -171,6 +171,7 @@ attribute_parsers!( Single>, Single>, Single>, + Single>, Single>, Single>, Single>, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs index 8f0948b8183..49ee96a41d6 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs @@ -1,13 +1,12 @@ // .debug_gdb_scripts binary section. -use rustc_ast::attr; +use rustc_attr_data_structures::{AttributeKind, find_attr}; use rustc_codegen_ssa::base::collect_debugger_visualizers_transitive; use rustc_codegen_ssa::traits::*; use rustc_hir::def_id::LOCAL_CRATE; use rustc_middle::bug; use rustc_middle::middle::debugger_visualizer::DebuggerVisualizerType; use rustc_session::config::{CrateType, DebugInfo}; -use rustc_span::sym; use crate::builder::Builder; use crate::common::CodegenCx; @@ -87,7 +86,7 @@ pub(crate) fn get_or_insert_gdb_debug_scripts_section_global<'ll>( pub(crate) fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool { let omit_gdb_pretty_printer_section = - attr::contains_name(cx.tcx.hir_krate_attrs(), sym::omit_gdb_pretty_printer_section); + find_attr!(cx.tcx.hir_krate_attrs(), AttributeKind::OmitGdbPrettyPrinterSection); // To ensure the section `__rustc_debug_gdb_scripts_section__` will not create // ODR violations at link time, this section will not be emitted for rlibs since diff --git a/compiler/rustc_parse/src/validate_attr.rs b/compiler/rustc_parse/src/validate_attr.rs index bb5c1e0e653..422c0982234 100644 --- a/compiler/rustc_parse/src/validate_attr.rs +++ b/compiler/rustc_parse/src/validate_attr.rs @@ -305,6 +305,7 @@ pub fn check_builtin_meta_item( | sym::naked | sym::no_mangle | sym::non_exhaustive + | sym::omit_gdb_pretty_printer_section | sym::path | sym::ignore | sym::must_use diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs index 2c95fead9e8..912513e88a7 100644 --- a/compiler/rustc_passes/src/check_attr.rs +++ b/compiler/rustc_passes/src/check_attr.rs @@ -240,7 +240,8 @@ impl<'tcx> CheckAttrVisitor<'tcx> { AttributeKind::BodyStability { .. } | AttributeKind::ConstStabilityIndirect | AttributeKind::MacroTransparency(_) - | AttributeKind::Dummy, + | AttributeKind::Dummy + | AttributeKind::OmitGdbPrettyPrinterSection, ) => { /* do nothing */ } Attribute::Parsed(AttributeKind::AsPtr(attr_span)) => { self.check_applied_to_fn_or_method(hir_id, *attr_span, span, target) @@ -369,7 +370,6 @@ impl<'tcx> CheckAttrVisitor<'tcx> { // need to be fixed | sym::cfi_encoding // FIXME(cfi_encoding) | sym::pointee // FIXME(derive_coerce_pointee) - | sym::omit_gdb_pretty_printer_section // FIXME(omit_gdb_pretty_printer_section) | sym::instruction_set // broken on stable!!! | sym::windows_subsystem // broken on stable!!! | sym::patchable_function_entry // FIXME(patchable_function_entry) diff --git a/tests/ui/attributes/malformed-attrs.stderr b/tests/ui/attributes/malformed-attrs.stderr index be529df3a49..b8a71be18fc 100644 --- a/tests/ui/attributes/malformed-attrs.stderr +++ b/tests/ui/attributes/malformed-attrs.stderr @@ -22,12 +22,6 @@ error[E0463]: can't find crate for `wloop` LL | extern crate wloop; | ^^^^^^^^^^^^^^^^^^^ can't find crate -error: malformed `omit_gdb_pretty_printer_section` attribute input - --> $DIR/malformed-attrs.rs:26:1 - | -LL | #![omit_gdb_pretty_printer_section = 1] - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: must be of the form: `#![omit_gdb_pretty_printer_section]` - error: malformed `windows_subsystem` attribute input --> $DIR/malformed-attrs.rs:29:1 | @@ -283,6 +277,15 @@ LL | #[debugger_visualizer] = note: OR = note: expected: `gdb_script_file = "..."` +error[E0565]: malformed `omit_gdb_pretty_printer_section` attribute input + --> $DIR/malformed-attrs.rs:26:1 + | +LL | #![omit_gdb_pretty_printer_section = 1] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^---^ + | | | + | | didn't expect any arguments here + | help: must be of the form: `#[omit_gdb_pretty_printer_section]` + error[E0539]: malformed `export_name` attribute input --> $DIR/malformed-attrs.rs:32:1 | -- cgit 1.4.1-3-g733a5 From d3d51b4fdbd6854da015f501e6566ca17cb023e5 Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Wed, 9 Jul 2025 09:12:42 +0000 Subject: Avoid a bunch of unnecessary `unsafe` blocks in cg_llvm --- compiler/rustc_codegen_llvm/src/back/write.rs | 77 ++++++++++------------ compiler/rustc_codegen_llvm/src/common.rs | 8 +-- compiler/rustc_codegen_llvm/src/consts.rs | 12 ++-- compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs | 2 +- .../rustc_codegen_llvm/src/debuginfo/metadata.rs | 2 +- compiler/rustc_codegen_llvm/src/declare.rs | 2 +- compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 6 +- compiler/rustc_codegen_llvm/src/llvm/mod.rs | 10 +-- 8 files changed, 54 insertions(+), 65 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 506286fc255..313bf6d20a6 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -879,9 +879,7 @@ pub(crate) fn codegen( .generic_activity_with_arg("LLVM_module_codegen_embed_bitcode", &*module.name); let thin_bc = module.thin_lto_buffer.as_deref().expect("cannot find embedded bitcode"); - unsafe { - embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, &thin_bc); - } + embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, &thin_bc); } } @@ -945,7 +943,7 @@ pub(crate) fn codegen( // binaries. So we must clone the module to produce the asm output // if we are also producing object code. let llmod = if let EmitObj::ObjectCode(_) = config.emit_obj { - unsafe { llvm::LLVMCloneModule(llmod) } + llvm::LLVMCloneModule(llmod) } else { llmod }; @@ -1073,7 +1071,7 @@ pub(crate) fn bitcode_section_name(cgcx: &CodegenContext) -> } /// Embed the bitcode of an LLVM module for LTO in the LLVM module itself. -unsafe fn embed_bitcode( +fn embed_bitcode( cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module, @@ -1115,43 +1113,40 @@ unsafe fn embed_bitcode( // Unfortunately, LLVM provides no way to set custom section flags. For ELF // and COFF we emit the sections using module level inline assembly for that // reason (see issue #90326 for historical background). - unsafe { - if cgcx.target_is_like_darwin - || cgcx.target_is_like_aix - || cgcx.target_arch == "wasm32" - || cgcx.target_arch == "wasm64" - { - // We don't need custom section flags, create LLVM globals. - let llconst = common::bytes_in_context(llcx, bitcode); - let llglobal = - llvm::add_global(llmod, common::val_ty(llconst), c"rustc.embedded.module"); - llvm::set_initializer(llglobal, llconst); - - llvm::set_section(llglobal, bitcode_section_name(cgcx)); - llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); - llvm::LLVMSetGlobalConstant(llglobal, llvm::True); - - let llconst = common::bytes_in_context(llcx, cmdline.as_bytes()); - let llglobal = - llvm::add_global(llmod, common::val_ty(llconst), c"rustc.embedded.cmdline"); - llvm::set_initializer(llglobal, llconst); - let section = if cgcx.target_is_like_darwin { - c"__LLVM,__cmdline" - } else if cgcx.target_is_like_aix { - c".info" - } else { - c".llvmcmd" - }; - llvm::set_section(llglobal, section); - llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); + + if cgcx.target_is_like_darwin + || cgcx.target_is_like_aix + || cgcx.target_arch == "wasm32" + || cgcx.target_arch == "wasm64" + { + // We don't need custom section flags, create LLVM globals. + let llconst = common::bytes_in_context(llcx, bitcode); + let llglobal = llvm::add_global(llmod, common::val_ty(llconst), c"rustc.embedded.module"); + llvm::set_initializer(llglobal, llconst); + + llvm::set_section(llglobal, bitcode_section_name(cgcx)); + llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); + llvm::LLVMSetGlobalConstant(llglobal, llvm::True); + + let llconst = common::bytes_in_context(llcx, cmdline.as_bytes()); + let llglobal = llvm::add_global(llmod, common::val_ty(llconst), c"rustc.embedded.cmdline"); + llvm::set_initializer(llglobal, llconst); + let section = if cgcx.target_is_like_darwin { + c"__LLVM,__cmdline" + } else if cgcx.target_is_like_aix { + c".info" } else { - // We need custom section flags, so emit module-level inline assembly. - let section_flags = if cgcx.is_pe_coff { "n" } else { "e" }; - let asm = create_section_with_flags_asm(".llvmbc", section_flags, bitcode); - llvm::append_module_inline_asm(llmod, &asm); - let asm = create_section_with_flags_asm(".llvmcmd", section_flags, cmdline.as_bytes()); - llvm::append_module_inline_asm(llmod, &asm); - } + c".llvmcmd" + }; + llvm::set_section(llglobal, section); + llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); + } else { + // We need custom section flags, so emit module-level inline assembly. + let section_flags = if cgcx.is_pe_coff { "n" } else { "e" }; + let asm = create_section_with_flags_asm(".llvmbc", section_flags, bitcode); + llvm::append_module_inline_asm(llmod, &asm); + let asm = create_section_with_flags_asm(".llvmcmd", section_flags, cmdline.as_bytes()); + llvm::append_module_inline_asm(llmod, &asm); } } diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index b9b5c776d86..f9ab96b5789 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -215,10 +215,10 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { bug!("symbol `{}` is already defined", sym); }); llvm::set_initializer(g, sc); - unsafe { - llvm::LLVMSetGlobalConstant(g, True); - llvm::LLVMSetUnnamedAddress(g, llvm::UnnamedAddr::Global); - } + + llvm::set_global_constant(g, true); + llvm::set_unnamed_address(g, llvm::UnnamedAddr::Global); + llvm::set_linkage(g, llvm::Linkage::InternalLinkage); // Cast to default address space if globals are in a different addrspace let g = self.const_pointercast(g, self.type_ptr()); diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 5deddb3ed98..070a9b64486 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -19,11 +19,10 @@ use tracing::{debug, instrument, trace}; use crate::common::{AsCCharPtr, CodegenCx}; use crate::errors::SymbolAlreadyDefined; -use crate::llvm::{self, True}; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; -use crate::{base, debuginfo}; +use crate::{base, debuginfo, llvm}; pub(crate) fn const_alloc_to_llvm<'ll>( cx: &CodegenCx<'ll, '_>, @@ -247,7 +246,7 @@ impl<'ll> CodegenCx<'ll, '_> { }; llvm::set_initializer(gv, cv); set_global_alignment(self, gv, align); - llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global); + llvm::set_unnamed_address(gv, llvm::UnnamedAddr::Global); gv } @@ -272,9 +271,8 @@ impl<'ll> CodegenCx<'ll, '_> { return gv; } let gv = self.static_addr_of_mut(cv, align, kind); - unsafe { - llvm::LLVMSetGlobalConstant(gv, True); - } + llvm::set_global_constant(gv, true); + self.const_globals.borrow_mut().insert(cv, gv); gv } @@ -465,7 +463,7 @@ impl<'ll> CodegenCx<'ll, '_> { // Forward the allocation's mutability (picked by the const interner) to LLVM. if alloc.mutability.is_not() { - llvm::LLVMSetGlobalConstant(g, llvm::True); + llvm::set_global_constant(g, true); } debuginfo::build_global_var_di_node(self, def_id, g); diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs index 8f0948b8183..3e04dac6cd6 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs @@ -75,7 +75,7 @@ pub(crate) fn get_or_insert_gdb_debug_scripts_section_global<'ll>( llvm::set_section(section_var, c".debug_gdb_scripts"); llvm::set_initializer(section_var, cx.const_bytes(section_contents)); llvm::LLVMSetGlobalConstant(section_var, llvm::True); - llvm::LLVMSetUnnamedAddress(section_var, llvm::UnnamedAddr::Global); + llvm::set_unnamed_address(section_var, llvm::UnnamedAddr::Global); llvm::set_linkage(section_var, llvm::Linkage::LinkOnceODRLinkage); // This should make sure that the whole section is not larger than // the string it contains. Otherwise we get a warning from GDB. diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 9b4736e50e6..1a50fb5823b 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -1630,7 +1630,7 @@ pub(crate) fn create_vtable_di_node<'ll, 'tcx>( // When full debuginfo is enabled, we want to try and prevent vtables from being // merged. Otherwise debuggers will have a hard time mapping from dyn pointer // to concrete type. - llvm::SetUnnamedAddress(vtable, llvm::UnnamedAddr::No); + llvm::set_unnamed_address(vtable, llvm::UnnamedAddr::No); let vtable_name = compute_debuginfo_vtable_name(cx.tcx, ty, poly_trait_ref, VTableNameKind::GlobalVariable); diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index 2419ec1f888..710032c774b 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -49,7 +49,7 @@ pub(crate) fn declare_simple_fn<'ll>( }; llvm::SetFunctionCallConv(llfn, callconv); - llvm::SetUnnamedAddress(llfn, unnamed); + llvm::set_unnamed_address(llfn, unnamed); llvm::set_visibility(llfn, visibility); llfn diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 5c34ab2e304..2af57e22cd5 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1009,7 +1009,7 @@ unsafe extern "C" { ModuleID: *const c_char, C: &Context, ) -> &Module; - pub(crate) fn LLVMCloneModule(M: &Module) -> &Module; + pub(crate) safe fn LLVMCloneModule(M: &Module) -> &Module; /// Data layout. See Module::getDataLayout. pub(crate) fn LLVMGetDataLayoutStr(M: &Module) -> *const c_char; @@ -1179,7 +1179,7 @@ unsafe extern "C" { pub(crate) fn LLVMIsThreadLocal(GlobalVar: &Value) -> Bool; pub(crate) fn LLVMSetThreadLocalMode(GlobalVar: &Value, Mode: ThreadLocalMode); pub(crate) fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool; - pub(crate) fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool); + pub(crate) safe fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool); pub(crate) safe fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool); // Operations on attributes @@ -1718,7 +1718,7 @@ unsafe extern "C" { pub(crate) safe fn LLVMMetadataAsValue<'a>(C: &'a Context, MD: &'a Metadata) -> &'a Value; - pub(crate) fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr); + pub(crate) safe fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr); pub(crate) fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&ConstantInt>; diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs index 3fc83fca352..154ba4fd690 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs @@ -217,10 +217,8 @@ pub(crate) fn SetUniqueComdat(llmod: &Module, val: &Value) { set_comdat(llmod, val, &name); } -pub(crate) fn SetUnnamedAddress(global: &Value, unnamed: UnnamedAddr) { - unsafe { - LLVMSetUnnamedAddress(global, unnamed); - } +pub(crate) fn set_unnamed_address(global: &Value, unnamed: UnnamedAddr) { + LLVMSetUnnamedAddress(global, unnamed); } pub(crate) fn set_thread_local_mode(global: &Value, mode: ThreadLocalMode) { @@ -260,9 +258,7 @@ pub(crate) fn set_initializer(llglobal: &Value, constant_val: &Value) { } pub(crate) fn set_global_constant(llglobal: &Value, is_constant: bool) { - unsafe { - LLVMSetGlobalConstant(llglobal, if is_constant { ffi::True } else { ffi::False }); - } + LLVMSetGlobalConstant(llglobal, if is_constant { ffi::True } else { ffi::False }); } pub(crate) fn get_linkage(llglobal: &Value) -> Linkage { -- cgit 1.4.1-3-g733a5 From e574fef728605089da8144960348469896751c3c Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Wed, 9 Jul 2025 09:23:05 +0000 Subject: Shrink some `unsafe` blocks in cg_llvm --- compiler/rustc_codegen_llvm/src/builder.rs | 13 +- compiler/rustc_codegen_llvm/src/consts.rs | 251 +++++++++++++-------------- compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 8 +- compiler/rustc_codegen_llvm/src/mono_item.rs | 4 +- 4 files changed, 137 insertions(+), 139 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 9c3b866aa3c..7304460c493 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -637,17 +637,16 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } else if place.layout.is_llvm_immediate() { let mut const_llval = None; let llty = place.layout.llvm_type(self); - unsafe { - if let Some(global) = llvm::LLVMIsAGlobalVariable(place.val.llval) { - if llvm::LLVMIsGlobalConstant(global) == llvm::True { - if let Some(init) = llvm::LLVMGetInitializer(global) { - if self.val_ty(init) == llty { - const_llval = Some(init); - } + if let Some(global) = llvm::LLVMIsAGlobalVariable(place.val.llval) { + if llvm::LLVMIsGlobalConstant(global) == llvm::True { + if let Some(init) = llvm::LLVMGetInitializer(global) { + if self.val_ty(init) == llty { + const_llval = Some(init); } } } } + let llval = const_llval.unwrap_or_else(|| { let load = self.load(llty, place.val.llval, place.val.align); if let abi::BackendRepr::Scalar(scalar) = place.layout.backend_repr { diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 070a9b64486..27f6a119727 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -396,149 +396,148 @@ impl<'ll> CodegenCx<'ll, '_> { } fn codegen_static_item(&mut self, def_id: DefId) { - unsafe { - assert!( - llvm::LLVMGetInitializer( - self.instances.borrow().get(&Instance::mono(self.tcx, def_id)).unwrap() - ) - .is_none() - ); - let attrs = self.tcx.codegen_fn_attrs(def_id); + assert!( + llvm::LLVMGetInitializer( + self.instances.borrow().get(&Instance::mono(self.tcx, def_id)).unwrap() + ) + .is_none() + ); + let attrs = self.tcx.codegen_fn_attrs(def_id); - let Ok((v, alloc)) = codegen_static_initializer(self, def_id) else { - // Error has already been reported - return; - }; - let alloc = alloc.inner(); + let Ok((v, alloc)) = codegen_static_initializer(self, def_id) else { + // Error has already been reported + return; + }; + let alloc = alloc.inner(); - let val_llty = self.val_ty(v); + let val_llty = self.val_ty(v); - let g = self.get_static_inner(def_id, val_llty); - let llty = self.get_type_of_global(g); + let g = self.get_static_inner(def_id, val_llty); + let llty = self.get_type_of_global(g); - let g = if val_llty == llty { - g - } else { - // codegen_static_initializer creates the global value just from the - // `Allocation` data by generating one big struct value that is just - // all the bytes and pointers after each other. This will almost never - // match the type that the static was declared with. Unfortunately - // we can't just LLVMConstBitCast our way out of it because that has very - // specific rules on what can be cast. So instead of adding a new way to - // generate static initializers that match the static's type, we picked - // the easier option and retroactively change the type of the static item itself. - let name = llvm::get_value_name(g); - llvm::set_value_name(g, b""); - - let linkage = llvm::get_linkage(g); - let visibility = llvm::get_visibility(g); - - let new_g = llvm::LLVMRustGetOrInsertGlobal( - self.llmod, - name.as_c_char_ptr(), - name.len(), - val_llty, - ); - - llvm::set_linkage(new_g, linkage); - llvm::set_visibility(new_g, visibility); - - // The old global has had its name removed but is returned by - // get_static since it is in the instance cache. Provide an - // alternative lookup that points to the new global so that - // global_asm! can compute the correct mangled symbol name - // for the global. - self.renamed_statics.borrow_mut().insert(def_id, new_g); - - // To avoid breaking any invariants, we leave around the old - // global for the moment; we'll replace all references to it - // with the new global later. (See base::codegen_backend.) - self.statics_to_rauw.borrow_mut().push((g, new_g)); - new_g - }; - set_global_alignment(self, g, alloc.align); - llvm::set_initializer(g, v); - - self.assume_dso_local(g, true); - - // Forward the allocation's mutability (picked by the const interner) to LLVM. - if alloc.mutability.is_not() { - llvm::set_global_constant(g, true); - } + let g = if val_llty == llty { + g + } else { + // codegen_static_initializer creates the global value just from the + // `Allocation` data by generating one big struct value that is just + // all the bytes and pointers after each other. This will almost never + // match the type that the static was declared with. Unfortunately + // we can't just LLVMConstBitCast our way out of it because that has very + // specific rules on what can be cast. So instead of adding a new way to + // generate static initializers that match the static's type, we picked + // the easier option and retroactively change the type of the static item itself. + let name = String::from_utf8(llvm::get_value_name(g)) + .expect("we declare our statics with a utf8-valid name"); + llvm::set_value_name(g, b""); + + let linkage = llvm::get_linkage(g); + let visibility = llvm::get_visibility(g); + + let new_g = self.declare_global(&name, val_llty); + + llvm::set_linkage(new_g, linkage); + llvm::set_visibility(new_g, visibility); + + // The old global has had its name removed but is returned by + // get_static since it is in the instance cache. Provide an + // alternative lookup that points to the new global so that + // global_asm! can compute the correct mangled symbol name + // for the global. + self.renamed_statics.borrow_mut().insert(def_id, new_g); + + // To avoid breaking any invariants, we leave around the old + // global for the moment; we'll replace all references to it + // with the new global later. (See base::codegen_backend.) + self.statics_to_rauw.borrow_mut().push((g, new_g)); + new_g + }; + set_global_alignment(self, g, alloc.align); + llvm::set_initializer(g, v); - debuginfo::build_global_var_di_node(self, def_id, g); + self.assume_dso_local(g, true); - if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { - llvm::set_thread_local_mode(g, self.tls_model); - } + // Forward the allocation's mutability (picked by the const interner) to LLVM. + if alloc.mutability.is_not() { + llvm::set_global_constant(g, true); + } - // Wasm statics with custom link sections get special treatment as they - // go into custom sections of the wasm executable. The exception to this - // is the `.init_array` section which are treated specially by the wasm linker. - if self.tcx.sess.target.is_like_wasm - && attrs - .link_section - .map(|link_section| !link_section.as_str().starts_with(".init_array")) - .unwrap_or(true) - { - if let Some(section) = attrs.link_section { - let section = llvm::LLVMMDStringInContext2( + debuginfo::build_global_var_di_node(self, def_id, g); + + if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { + llvm::set_thread_local_mode(g, self.tls_model); + } + + // Wasm statics with custom link sections get special treatment as they + // go into custom sections of the wasm executable. The exception to this + // is the `.init_array` section which are treated specially by the wasm linker. + if self.tcx.sess.target.is_like_wasm + && attrs + .link_section + .map(|link_section| !link_section.as_str().starts_with(".init_array")) + .unwrap_or(true) + { + if let Some(section) = attrs.link_section { + let section = unsafe { + llvm::LLVMMDStringInContext2( self.llcx, section.as_str().as_c_char_ptr(), section.as_str().len(), - ); - assert!(alloc.provenance().ptrs().is_empty()); - - // The `inspect` method is okay here because we checked for provenance, and - // because we are doing this access to inspect the final interpreter state (not - // as part of the interpreter execution). - let bytes = - alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()); - let alloc = - llvm::LLVMMDStringInContext2(self.llcx, bytes.as_c_char_ptr(), bytes.len()); - let data = [section, alloc]; - let meta = llvm::LLVMMDNodeInContext2(self.llcx, data.as_ptr(), data.len()); - let val = self.get_metadata_value(meta); + ) + }; + assert!(alloc.provenance().ptrs().is_empty()); + + // The `inspect` method is okay here because we checked for provenance, and + // because we are doing this access to inspect the final interpreter state (not + // as part of the interpreter execution). + let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()); + let alloc = unsafe { + llvm::LLVMMDStringInContext2(self.llcx, bytes.as_c_char_ptr(), bytes.len()) + }; + let data = [section, alloc]; + let meta = + unsafe { llvm::LLVMMDNodeInContext2(self.llcx, data.as_ptr(), data.len()) }; + let val = self.get_metadata_value(meta); + unsafe { llvm::LLVMAddNamedMetadataOperand( self.llmod, c"wasm.custom_sections".as_ptr(), val, - ); - } - } else { - base::set_link_section(g, attrs); + ) + }; } + } else { + base::set_link_section(g, attrs); + } - base::set_variable_sanitizer_attrs(g, attrs); - - if attrs.flags.contains(CodegenFnAttrFlags::USED_COMPILER) { - // `USED` and `USED_LINKER` can't be used together. - assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER)); - - // The semantics of #[used] in Rust only require the symbol to make it into the - // object file. It is explicitly allowed for the linker to strip the symbol if it - // is dead, which means we are allowed to use `llvm.compiler.used` instead of - // `llvm.used` here. - // - // Additionally, https://reviews.llvm.org/D97448 in LLVM 13 started emitting unique - // sections with SHF_GNU_RETAIN flag for llvm.used symbols, which may trigger bugs - // in the handling of `.init_array` (the static constructor list) in versions of - // the gold linker (prior to the one released with binutils 2.36). - // - // That said, we only ever emit these when `#[used(compiler)]` is explicitly - // requested. This is to avoid similar breakage on other targets, in particular - // MachO targets have *their* static constructor lists broken if `llvm.compiler.used` - // is emitted rather than `llvm.used`. However, that check happens when assigning - // the `CodegenFnAttrFlags` in the `codegen_fn_attrs` query, so we don't need to - // take care of it here. - self.add_compiler_used_global(g); - } - if attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) { - // `USED` and `USED_LINKER` can't be used together. - assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED_COMPILER)); + base::set_variable_sanitizer_attrs(g, attrs); + + if attrs.flags.contains(CodegenFnAttrFlags::USED_COMPILER) { + // `USED` and `USED_LINKER` can't be used together. + assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER)); + + // The semantics of #[used] in Rust only require the symbol to make it into the + // object file. It is explicitly allowed for the linker to strip the symbol if it + // is dead, which means we are allowed to use `llvm.compiler.used` instead of + // `llvm.used` here. + // + // Additionally, https://reviews.llvm.org/D97448 in LLVM 13 started emitting unique + // sections with SHF_GNU_RETAIN flag for llvm.used symbols, which may trigger bugs + // in the handling of `.init_array` (the static constructor list) in versions of + // the gold linker (prior to the one released with binutils 2.36). + // + // That said, we only ever emit these when `#[used(compiler)]` is explicitly + // requested. This is to avoid similar breakage on other targets, in particular + // MachO targets have *their* static constructor lists broken if `llvm.compiler.used` + // is emitted rather than `llvm.used`. However, that check happens when assigning + // the `CodegenFnAttrFlags` in the `codegen_fn_attrs` query, so we don't need to + // take care of it here. + self.add_compiler_used_global(g); + } + if attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) { + // `USED` and `USED_LINKER` can't be used together. + assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED_COMPILER)); - self.add_used_global(g); - } + self.add_used_global(g); } } diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 2af57e22cd5..0b1e632cbc4 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1168,17 +1168,17 @@ unsafe extern "C" { pub(crate) fn LLVMGlobalGetValueType(Global: &Value) -> &Type; // Operations on global variables - pub(crate) fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>; + pub(crate) safe fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>; pub(crate) fn LLVMAddGlobal<'a>(M: &'a Module, Ty: &'a Type, Name: *const c_char) -> &'a Value; pub(crate) fn LLVMGetNamedGlobal(M: &Module, Name: *const c_char) -> Option<&Value>; pub(crate) fn LLVMGetFirstGlobal(M: &Module) -> Option<&Value>; pub(crate) fn LLVMGetNextGlobal(GlobalVar: &Value) -> Option<&Value>; pub(crate) fn LLVMDeleteGlobal(GlobalVar: &Value); - pub(crate) fn LLVMGetInitializer(GlobalVar: &Value) -> Option<&Value>; + pub(crate) safe fn LLVMGetInitializer(GlobalVar: &Value) -> Option<&Value>; pub(crate) fn LLVMSetInitializer<'a>(GlobalVar: &'a Value, ConstantVal: &'a Value); - pub(crate) fn LLVMIsThreadLocal(GlobalVar: &Value) -> Bool; + pub(crate) safe fn LLVMIsThreadLocal(GlobalVar: &Value) -> Bool; pub(crate) fn LLVMSetThreadLocalMode(GlobalVar: &Value, Mode: ThreadLocalMode); - pub(crate) fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool; + pub(crate) safe fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool; pub(crate) safe fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool); pub(crate) safe fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool); diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs index 3f38e1e191b..8f70270f203 100644 --- a/compiler/rustc_codegen_llvm/src/mono_item.rs +++ b/compiler/rustc_codegen_llvm/src/mono_item.rs @@ -131,8 +131,8 @@ impl CodegenCx<'_, '_> { } // Thread-local variables generally don't support copy relocations. - let is_thread_local_var = unsafe { llvm::LLVMIsAGlobalVariable(llval) } - .is_some_and(|v| unsafe { llvm::LLVMIsThreadLocal(v) } == llvm::True); + let is_thread_local_var = llvm::LLVMIsAGlobalVariable(llval) + .is_some_and(|v| llvm::LLVMIsThreadLocal(v) == llvm::True); if is_thread_local_var { return false; } -- cgit 1.4.1-3-g733a5 From b9baf63f99bbdf69571e26fca907af032c416591 Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Wed, 9 Jul 2025 09:31:58 +0000 Subject: Merge `typeid_metadata` and `create_metadata` --- compiler/rustc_codegen_llvm/src/builder.rs | 2 +- compiler/rustc_codegen_llvm/src/builder/autodiff.rs | 16 ++++++++-------- compiler/rustc_codegen_llvm/src/context.rs | 7 ++++--- compiler/rustc_codegen_llvm/src/type_.rs | 10 ++++------ 4 files changed, 17 insertions(+), 18 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 7304460c493..4c5bc8430cc 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -1720,7 +1720,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { } else { cfi::typeid_for_fnabi(self.tcx, fn_abi, options) }; - let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap(); + let typeid_metadata = self.cx.create_metadata(typeid); let dbg_loc = self.get_dbg_loc(); // Test whether the function pointer is associated with the type identifier using the diff --git a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs index 5afb9a60d42..9383903fd02 100644 --- a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs +++ b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs @@ -76,12 +76,12 @@ fn match_args_from_caller_to_enzyme<'ll>( outer_pos = 1; } - let enzyme_const = cx.create_metadata("enzyme_const".to_string()).unwrap(); - let enzyme_out = cx.create_metadata("enzyme_out".to_string()).unwrap(); - let enzyme_dup = cx.create_metadata("enzyme_dup".to_string()).unwrap(); - let enzyme_dupv = cx.create_metadata("enzyme_dupv".to_string()).unwrap(); - let enzyme_dupnoneed = cx.create_metadata("enzyme_dupnoneed".to_string()).unwrap(); - let enzyme_dupnoneedv = cx.create_metadata("enzyme_dupnoneedv".to_string()).unwrap(); + let enzyme_const = cx.create_metadata("enzyme_const".to_string()); + let enzyme_out = cx.create_metadata("enzyme_out".to_string()); + let enzyme_dup = cx.create_metadata("enzyme_dup".to_string()); + let enzyme_dupv = cx.create_metadata("enzyme_dupv".to_string()); + let enzyme_dupnoneed = cx.create_metadata("enzyme_dupnoneed".to_string()); + let enzyme_dupnoneedv = cx.create_metadata("enzyme_dupnoneedv".to_string()); while activity_pos < inputs.len() { let diff_activity = inputs[activity_pos as usize]; @@ -378,12 +378,12 @@ fn generate_enzyme_call<'ll>( let mut args = Vec::with_capacity(num_args as usize + 1); args.push(fn_to_diff); - let enzyme_primal_ret = cx.create_metadata("enzyme_primal_return".to_string()).unwrap(); + let enzyme_primal_ret = cx.create_metadata("enzyme_primal_return".to_string()); if matches!(attrs.ret_activity, DiffActivity::Dual | DiffActivity::Active) { args.push(cx.get_metadata_value(enzyme_primal_ret)); } if attrs.width > 1 { - let enzyme_width = cx.create_metadata("enzyme_width".to_string()).unwrap(); + let enzyme_width = cx.create_metadata("enzyme_width".to_string()); args.push(cx.get_metadata_value(enzyme_width)); args.push(cx.get_const_int(cx.type_i64(), attrs.width as u64)); } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 90582e23b04..5616c4d4e45 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -473,6 +473,7 @@ pub(crate) unsafe fn create_module<'ll>( #[allow(clippy::option_env_unwrap)] let rustc_producer = format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION")); + let name_metadata = unsafe { llvm::LLVMMDStringInContext2( llcx, @@ -698,10 +699,10 @@ impl<'ll, CX: Borrow>> GenericCx<'ll, CX> { } } - pub(crate) fn create_metadata(&self, name: String) -> Option<&'ll Metadata> { - Some(unsafe { + pub(crate) fn create_metadata(&self, name: String) -> &'ll Metadata { + unsafe { llvm::LLVMMDStringInContext2(self.llcx(), name.as_ptr() as *const c_char, name.len()) - }) + } } pub(crate) fn get_functions(&self) -> Vec<&'ll Value> { diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index ee472e75ed4..5b19ca91e4d 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -2,7 +2,7 @@ use std::borrow::Borrow; use std::hash::{Hash, Hasher}; use std::{fmt, ptr}; -use libc::{c_char, c_uint}; +use libc::c_uint; use rustc_abi::{AddressSpace, Align, Integer, Reg, Size}; use rustc_codegen_ssa::common::TypeKind; use rustc_codegen_ssa::traits::*; @@ -299,7 +299,7 @@ impl<'ll, 'tcx> LayoutTypeCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { impl<'ll, 'tcx> TypeMembershipCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn add_type_metadata(&self, function: &'ll Value, typeid: String) { - let typeid_metadata = self.typeid_metadata(typeid).unwrap(); + let typeid_metadata = self.create_metadata(typeid); unsafe { let v = [llvm::LLVMValueAsMetadata(self.const_usize(0)), typeid_metadata]; llvm::LLVMRustGlobalAddMetadata( @@ -311,7 +311,7 @@ impl<'ll, 'tcx> TypeMembershipCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn set_type_metadata(&self, function: &'ll Value, typeid: String) { - let typeid_metadata = self.typeid_metadata(typeid).unwrap(); + let typeid_metadata = self.create_metadata(typeid); unsafe { let v = [llvm::LLVMValueAsMetadata(self.const_usize(0)), typeid_metadata]; llvm::LLVMGlobalSetMetadata( @@ -323,9 +323,7 @@ impl<'ll, 'tcx> TypeMembershipCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn typeid_metadata(&self, typeid: String) -> Option<&'ll Metadata> { - Some(unsafe { - llvm::LLVMMDStringInContext2(self.llcx, typeid.as_ptr() as *const c_char, typeid.len()) - }) + Some(self.create_metadata(typeid)) } fn add_kcfi_type_metadata(&self, function: &'ll Value, kcfi_typeid: u32) { -- cgit 1.4.1-3-g733a5 From 56d22cd29ff6d3ea1fa8972462ad94792960b1ef Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Wed, 9 Jul 2025 09:36:19 +0000 Subject: Use context methods instead of directly calling FFI --- compiler/rustc_codegen_llvm/src/builder.rs | 4 +--- compiler/rustc_codegen_llvm/src/consts.rs | 8 +------- compiler/rustc_codegen_llvm/src/context.rs | 14 +++++--------- compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs | 8 ++------ 4 files changed, 9 insertions(+), 25 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 4c5bc8430cc..3c64cb79d5e 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -303,9 +303,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } let id_str = "branch_weights"; - let id = unsafe { - llvm::LLVMMDStringInContext2(self.cx.llcx, id_str.as_ptr().cast(), id_str.len()) - }; + let id = self.cx.create_metadata(id_str.into()); // For switch instructions with 2 targets, the `llvm.expect` intrinsic is used. // This function handles switch instructions with more than 2 targets and it needs to diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 27f6a119727..40e686b253c 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -477,13 +477,7 @@ impl<'ll> CodegenCx<'ll, '_> { .unwrap_or(true) { if let Some(section) = attrs.link_section { - let section = unsafe { - llvm::LLVMMDStringInContext2( - self.llcx, - section.as_str().as_c_char_ptr(), - section.as_str().len(), - ) - }; + let section = self.create_metadata(section.as_str().into()); assert!(alloc.provenance().ptrs().is_empty()); // The `inspect` method is okay here because we checked for provenance, and diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 5616c4d4e45..51ba3fb8089 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -34,7 +34,6 @@ use smallvec::SmallVec; use crate::back::write::to_llvm_code_model; use crate::callee::get_fn; -use crate::common::AsCCharPtr; use crate::debuginfo::metadata::apply_vcall_visibility_metadata; use crate::llvm::Metadata; use crate::type_::Type; @@ -169,6 +168,8 @@ pub(crate) unsafe fn create_module<'ll>( let mod_name = SmallCStr::new(mod_name); let llmod = unsafe { llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx) }; + let cx = SimpleCx::new(llmod, llcx, tcx.data_layout.pointer_size()); + let mut target_data_layout = sess.target.data_layout.to_string(); let llvm_version = llvm_util::get_version(); @@ -474,18 +475,13 @@ pub(crate) unsafe fn create_module<'ll>( let rustc_producer = format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION")); - let name_metadata = unsafe { - llvm::LLVMMDStringInContext2( - llcx, - rustc_producer.as_c_char_ptr(), - rustc_producer.as_bytes().len(), - ) - }; + let name_metadata = cx.create_metadata(rustc_producer); + unsafe { llvm::LLVMAddNamedMetadataOperand( llmod, c"llvm.ident".as_ptr(), - &llvm::LLVMMetadataAsValue(llcx, llvm::LLVMMDNodeInContext2(llcx, &name_metadata, 1)), + &cx.get_metadata_value(llvm::LLVMMDNodeInContext2(llcx, &name_metadata, 1)), ); } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 1a50fb5823b..3fdf4e19d58 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -5,7 +5,7 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::{iter, ptr}; -use libc::{c_char, c_longlong, c_uint}; +use libc::{c_longlong, c_uint}; use rustc_abi::{Align, Size}; use rustc_codegen_ssa::debuginfo::type_names::{VTableNameKind, cpp_like_debuginfo}; use rustc_codegen_ssa::traits::*; @@ -1582,13 +1582,9 @@ pub(crate) fn apply_vcall_visibility_metadata<'ll, 'tcx>( }; let trait_ref_typeid = typeid_for_trait_ref(cx.tcx, trait_ref); + let typeid = cx.create_metadata(trait_ref_typeid); unsafe { - let typeid = llvm::LLVMMDStringInContext2( - cx.llcx, - trait_ref_typeid.as_ptr() as *const c_char, - trait_ref_typeid.as_bytes().len(), - ); let v = [llvm::LLVMValueAsMetadata(cx.const_usize(0)), typeid]; llvm::LLVMRustGlobalAddMetadata( vtable, -- cgit 1.4.1-3-g733a5 From 7f95f042677f86df55da58cdebe9ce31a1e928a8 Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Wed, 9 Jul 2025 09:48:46 +0000 Subject: Eliminate all direct uses of LLVMMDStringInContext2 --- compiler/rustc_codegen_llvm/src/builder.rs | 5 ++--- compiler/rustc_codegen_llvm/src/builder/autodiff.rs | 16 ++++++++-------- compiler/rustc_codegen_llvm/src/consts.rs | 8 +++----- compiler/rustc_codegen_llvm/src/context.rs | 4 ++-- compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs | 2 +- compiler/rustc_codegen_llvm/src/declare.rs | 4 ++-- compiler/rustc_codegen_llvm/src/type_.rs | 6 +++--- compiler/rustc_codegen_ssa/src/meth.rs | 3 ++- compiler/rustc_codegen_ssa/src/traits/type_.rs | 6 +++--- 9 files changed, 26 insertions(+), 28 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 3c64cb79d5e..514923ad6f3 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -302,8 +302,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { return; } - let id_str = "branch_weights"; - let id = self.cx.create_metadata(id_str.into()); + let id = self.cx.create_metadata(b"branch_weights"); // For switch instructions with 2 targets, the `llvm.expect` intrinsic is used. // This function handles switch instructions with more than 2 targets and it needs to @@ -1718,7 +1717,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { } else { cfi::typeid_for_fnabi(self.tcx, fn_abi, options) }; - let typeid_metadata = self.cx.create_metadata(typeid); + let typeid_metadata = self.cx.create_metadata(typeid.as_bytes()); let dbg_loc = self.get_dbg_loc(); // Test whether the function pointer is associated with the type identifier using the diff --git a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs index 9383903fd02..dff68472847 100644 --- a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs +++ b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs @@ -76,12 +76,12 @@ fn match_args_from_caller_to_enzyme<'ll>( outer_pos = 1; } - let enzyme_const = cx.create_metadata("enzyme_const".to_string()); - let enzyme_out = cx.create_metadata("enzyme_out".to_string()); - let enzyme_dup = cx.create_metadata("enzyme_dup".to_string()); - let enzyme_dupv = cx.create_metadata("enzyme_dupv".to_string()); - let enzyme_dupnoneed = cx.create_metadata("enzyme_dupnoneed".to_string()); - let enzyme_dupnoneedv = cx.create_metadata("enzyme_dupnoneedv".to_string()); + let enzyme_const = cx.create_metadata(b"enzyme_const"); + let enzyme_out = cx.create_metadata(b"enzyme_out"); + let enzyme_dup = cx.create_metadata(b"enzyme_dup"); + let enzyme_dupv = cx.create_metadata(b"enzyme_dupv"); + let enzyme_dupnoneed = cx.create_metadata(b"enzyme_dupnoneed"); + let enzyme_dupnoneedv = cx.create_metadata(b"enzyme_dupnoneedv"); while activity_pos < inputs.len() { let diff_activity = inputs[activity_pos as usize]; @@ -378,12 +378,12 @@ fn generate_enzyme_call<'ll>( let mut args = Vec::with_capacity(num_args as usize + 1); args.push(fn_to_diff); - let enzyme_primal_ret = cx.create_metadata("enzyme_primal_return".to_string()); + let enzyme_primal_ret = cx.create_metadata(b"enzyme_primal_return"); if matches!(attrs.ret_activity, DiffActivity::Dual | DiffActivity::Active) { args.push(cx.get_metadata_value(enzyme_primal_ret)); } if attrs.width > 1 { - let enzyme_width = cx.create_metadata("enzyme_width".to_string()); + let enzyme_width = cx.create_metadata(b"enzyme_width"); args.push(cx.get_metadata_value(enzyme_width)); args.push(cx.get_const_int(cx.type_i64(), attrs.width as u64)); } diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 40e686b253c..0b96b63bc85 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -17,7 +17,7 @@ use rustc_middle::ty::{self, Instance}; use rustc_middle::{bug, span_bug}; use tracing::{debug, instrument, trace}; -use crate::common::{AsCCharPtr, CodegenCx}; +use crate::common::CodegenCx; use crate::errors::SymbolAlreadyDefined; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; @@ -477,16 +477,14 @@ impl<'ll> CodegenCx<'ll, '_> { .unwrap_or(true) { if let Some(section) = attrs.link_section { - let section = self.create_metadata(section.as_str().into()); + let section = self.create_metadata(section.as_str().as_bytes()); assert!(alloc.provenance().ptrs().is_empty()); // The `inspect` method is okay here because we checked for provenance, and // because we are doing this access to inspect the final interpreter state (not // as part of the interpreter execution). let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()); - let alloc = unsafe { - llvm::LLVMMDStringInContext2(self.llcx, bytes.as_c_char_ptr(), bytes.len()) - }; + let alloc = self.create_metadata(bytes); let data = [section, alloc]; let meta = unsafe { llvm::LLVMMDNodeInContext2(self.llcx, data.as_ptr(), data.len()) }; diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 51ba3fb8089..6a23becaa96 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -475,7 +475,7 @@ pub(crate) unsafe fn create_module<'ll>( let rustc_producer = format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION")); - let name_metadata = cx.create_metadata(rustc_producer); + let name_metadata = cx.create_metadata(rustc_producer.as_bytes()); unsafe { llvm::LLVMAddNamedMetadataOperand( @@ -695,7 +695,7 @@ impl<'ll, CX: Borrow>> GenericCx<'ll, CX> { } } - pub(crate) fn create_metadata(&self, name: String) -> &'ll Metadata { + pub(crate) fn create_metadata(&self, name: &[u8]) -> &'ll Metadata { unsafe { llvm::LLVMMDStringInContext2(self.llcx(), name.as_ptr() as *const c_char, name.len()) } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 3fdf4e19d58..0e9dbfba658 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -1582,7 +1582,7 @@ pub(crate) fn apply_vcall_visibility_metadata<'ll, 'tcx>( }; let trait_ref_typeid = typeid_for_trait_ref(cx.tcx, trait_ref); - let typeid = cx.create_metadata(trait_ref_typeid); + let typeid = cx.create_metadata(trait_ref_typeid.as_bytes()); unsafe { let v = [llvm::LLVMValueAsMetadata(cx.const_usize(0)), typeid]; diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index 710032c774b..eb75716d768 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -176,7 +176,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { { let typeid = cfi::typeid_for_instance(self.tcx, instance, options); if typeids.insert(typeid.clone()) { - self.add_type_metadata(llfn, typeid); + self.add_type_metadata(llfn, typeid.as_bytes()); } } } else { @@ -189,7 +189,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { .map(cfi::TypeIdOptions::from_iter) { let typeid = cfi::typeid_for_fnabi(self.tcx, fn_abi, options); - self.add_type_metadata(llfn, typeid); + self.add_type_metadata(llfn, typeid.as_bytes()); } } } diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index 5b19ca91e4d..89365503138 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -298,7 +298,7 @@ impl<'ll, 'tcx> LayoutTypeCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } impl<'ll, 'tcx> TypeMembershipCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { - fn add_type_metadata(&self, function: &'ll Value, typeid: String) { + fn add_type_metadata(&self, function: &'ll Value, typeid: &[u8]) { let typeid_metadata = self.create_metadata(typeid); unsafe { let v = [llvm::LLVMValueAsMetadata(self.const_usize(0)), typeid_metadata]; @@ -310,7 +310,7 @@ impl<'ll, 'tcx> TypeMembershipCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn set_type_metadata(&self, function: &'ll Value, typeid: String) { + fn set_type_metadata(&self, function: &'ll Value, typeid: &[u8]) { let typeid_metadata = self.create_metadata(typeid); unsafe { let v = [llvm::LLVMValueAsMetadata(self.const_usize(0)), typeid_metadata]; @@ -322,7 +322,7 @@ impl<'ll, 'tcx> TypeMembershipCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn typeid_metadata(&self, typeid: String) -> Option<&'ll Metadata> { + fn typeid_metadata(&self, typeid: &[u8]) -> Option<&'ll Metadata> { Some(self.create_metadata(typeid)) } diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs index 2aa5c3c27ea..34ad35a729b 100644 --- a/compiler/rustc_codegen_ssa/src/meth.rs +++ b/compiler/rustc_codegen_ssa/src/meth.rs @@ -139,7 +139,8 @@ pub(crate) fn load_vtable<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( && bx.cx().sess().lto() == Lto::Fat { if let Some(trait_ref) = dyn_trait_in_self(bx.tcx(), ty) { - let typeid = bx.typeid_metadata(typeid_for_trait_ref(bx.tcx(), trait_ref)).unwrap(); + let typeid = + bx.typeid_metadata(typeid_for_trait_ref(bx.tcx(), trait_ref).as_bytes()).unwrap(); let func = bx.type_checked_load(llvtable, vtable_byte_offset, typeid); return func; } else if nonnull { diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs index dcd9e25b2c9..32c24965e1b 100644 --- a/compiler/rustc_codegen_ssa/src/traits/type_.rs +++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs @@ -154,9 +154,9 @@ pub trait LayoutTypeCodegenMethods<'tcx>: BackendTypes { // For backends that support CFI using type membership (i.e., testing whether a given pointer is // associated with a type identifier). pub trait TypeMembershipCodegenMethods<'tcx>: BackendTypes { - fn add_type_metadata(&self, _function: Self::Function, _typeid: String) {} - fn set_type_metadata(&self, _function: Self::Function, _typeid: String) {} - fn typeid_metadata(&self, _typeid: String) -> Option { + fn add_type_metadata(&self, _function: Self::Function, _typeid: &[u8]) {} + fn set_type_metadata(&self, _function: Self::Function, _typeid: &[u8]) {} + fn typeid_metadata(&self, _typeid: &[u8]) -> Option { None } fn add_kcfi_type_metadata(&self, _function: Self::Function, _typeid: u32) {} -- cgit 1.4.1-3-g733a5