about summary refs log tree commit diff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc_abi/src/lib.rs295
-rw-r--r--compiler/rustc_ast_lowering/src/format.rs4
-rw-r--r--compiler/rustc_attr_data_structures/src/attributes.rs15
-rw-r--r--compiler/rustc_attr_data_structures/src/encode_cross_crate.rs5
-rw-r--r--compiler/rustc_attr_parsing/messages.ftl4
-rw-r--r--compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs4
-rw-r--r--compiler/rustc_attr_parsing/src/attributes/deprecation.rs2
-rw-r--r--compiler/rustc_attr_parsing/src/attributes/dummy.rs19
-rw-r--r--compiler/rustc_attr_parsing/src/attributes/inline.rs4
-rw-r--r--compiler/rustc_attr_parsing/src/attributes/link_attrs.rs38
-rw-r--r--compiler/rustc_attr_parsing/src/attributes/mod.rs46
-rw-r--r--compiler/rustc_attr_parsing/src/attributes/must_use.rs2
-rw-r--r--compiler/rustc_attr_parsing/src/attributes/path.rs2
-rw-r--r--compiler/rustc_attr_parsing/src/attributes/rustc_internal.rs6
-rw-r--r--compiler/rustc_attr_parsing/src/attributes/test_attrs.rs2
-rw-r--r--compiler/rustc_attr_parsing/src/attributes/traits.rs2
-rw-r--r--compiler/rustc_attr_parsing/src/attributes/transparency.rs2
-rw-r--r--compiler/rustc_attr_parsing/src/context.rs11
-rw-r--r--compiler/rustc_borrowck/src/consumers.rs81
-rw-r--r--compiler/rustc_borrowck/src/lib.rs43
-rw-r--r--compiler/rustc_borrowck/src/nll.rs6
-rw-r--r--compiler/rustc_borrowck/src/root_cx.rs13
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/mod.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/common.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/constant.rs15
-rw-r--r--compiler/rustc_codegen_gcc/src/common.rs12
-rw-r--r--compiler/rustc_codegen_gcc/src/consts.rs4
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/mod.rs2
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/simd.rs2
-rw-r--r--compiler/rustc_codegen_gcc/src/lib.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs22
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/builder/autodiff.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs20
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs18
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs14
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs20
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/mod.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs54
-rw-r--r--compiler/rustc_codegen_ssa/src/back/link.rs31
-rw-r--r--compiler/rustc_codegen_ssa/src/back/linker.rs41
-rw-r--r--compiler/rustc_codegen_ssa/src/back/write.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/base.rs6
-rw-r--r--compiler/rustc_codegen_ssa/src/codegen_attrs.rs12
-rw-r--r--compiler/rustc_codegen_ssa/src/meth.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/analyze.rs3
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/naked_asm.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs177
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs27
-rw-r--r--compiler/rustc_const_eval/messages.ftl2
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs6
-rw-r--r--compiler/rustc_const_eval/src/errors.rs6
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs4
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs76
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs25
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs34
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs1
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs6
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs23
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs64
-rw-r--r--compiler/rustc_data_structures/src/profiling.rs2
-rw-r--r--compiler/rustc_errors/messages.ftl3
-rw-r--r--compiler/rustc_errors/src/diagnostic.rs2
-rw-r--r--compiler/rustc_errors/src/diagnostic_impls.rs4
-rw-r--r--compiler/rustc_errors/src/emitter.rs2
-rw-r--r--compiler/rustc_errors/src/lib.rs1
-rw-r--r--compiler/rustc_expand/messages.ftl26
-rw-r--r--compiler/rustc_expand/src/errors.rs44
-rw-r--r--compiler/rustc_expand/src/mbe/metavar_expr.rs98
-rw-r--r--compiler/rustc_hir/src/lang_items.rs2
-rw-r--r--compiler/rustc_hir_analysis/src/check/check.rs11
-rw-r--r--compiler/rustc_hir_analysis/src/check/intrinsic.rs9
-rw-r--r--compiler/rustc_hir_analysis/src/check/wfcheck.rs16
-rw-r--r--compiler/rustc_hir_typeck/messages.ftl7
-rw-r--r--compiler/rustc_hir_typeck/src/callee.rs23
-rw-r--r--compiler/rustc_hir_typeck/src/errors.rs18
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs11
-rw-r--r--compiler/rustc_hir_typeck/src/upvar.rs6
-rw-r--r--compiler/rustc_infer/src/lib.rs2
-rw-r--r--compiler/rustc_lint/src/early/diagnostics.rs3
-rw-r--r--compiler/rustc_lint/src/lints.rs1
-rw-r--r--compiler/rustc_lint/src/types.rs9
-rw-r--r--compiler/rustc_lint/src/unused.rs10
-rw-r--r--compiler/rustc_lint_defs/src/builtin.rs3
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp2
-rw-r--r--compiler/rustc_middle/src/arena.rs2
-rw-r--r--compiler/rustc_middle/src/hir/mod.rs12
-rw-r--r--compiler/rustc_middle/src/mir/consts.rs2
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation.rs6
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs15
-rw-r--r--compiler/rustc_middle/src/mir/interpret/error.rs2
-rw-r--r--compiler/rustc_middle/src/mir/interpret/mod.rs33
-rw-r--r--compiler/rustc_middle/src/mir/interpret/pointer.rs2
-rw-r--r--compiler/rustc_middle/src/mir/interpret/value.rs8
-rw-r--r--compiler/rustc_middle/src/mir/pretty.rs3
-rw-r--r--compiler/rustc_middle/src/query/mod.rs7
-rw-r--r--compiler/rustc_middle/src/traits/select.rs9
-rw-r--r--compiler/rustc_middle/src/ty/consts/int.rs6
-rw-r--r--compiler/rustc_middle/src/ty/layout.rs2
-rw-r--r--compiler/rustc_middle/src/ty/print/pretty.rs3
-rw-r--r--compiler/rustc_middle/src/ty/util.rs6
-rw-r--r--compiler/rustc_middle/src/ty/vtable.rs4
-rw-r--r--compiler/rustc_mir_build/src/builder/scope.rs5
-rw-r--r--compiler/rustc_mir_build/src/thir/constant.rs2
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/check_match.rs1
-rw-r--r--compiler/rustc_mir_dataflow/src/drop_flag_effects.rs28
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/direction.rs11
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/mod.rs1
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/initialized.rs88
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drops.rs2
-rw-r--r--compiler/rustc_mir_transform/src/remove_uninit_drops.rs1
-rw-r--r--compiler/rustc_monomorphize/src/collector.rs1
-rw-r--r--compiler/rustc_parse/src/lib.rs1
-rw-r--r--compiler/rustc_parse/src/parser/item.rs2
-rw-r--r--compiler/rustc_parse/src/parser/mod.rs6
-rw-r--r--compiler/rustc_parse/src/validate_attr.rs4
-rw-r--r--compiler/rustc_passes/src/check_attr.rs29
-rw-r--r--compiler/rustc_passes/src/check_export.rs7
-rw-r--r--compiler/rustc_passes/src/reachable.rs1
-rw-r--r--compiler/rustc_pattern_analysis/src/checks.rs50
-rw-r--r--compiler/rustc_pattern_analysis/src/lib.rs15
-rw-r--r--compiler/rustc_pattern_analysis/src/rustc.rs83
-rw-r--r--compiler/rustc_pattern_analysis/src/usefulness.rs7
-rw-r--r--compiler/rustc_pattern_analysis/tests/common/mod.rs9
-rw-r--r--compiler/rustc_query_impl/src/lib.rs1
-rw-r--r--compiler/rustc_resolve/src/build_reduced_graph.rs36
-rw-r--r--compiler/rustc_resolve/src/def_collector.rs2
-rw-r--r--compiler/rustc_resolve/src/diagnostics.rs9
-rw-r--r--compiler/rustc_resolve/src/ident.rs261
-rw-r--r--compiler/rustc_resolve/src/late/diagnostics.rs19
-rw-r--r--compiler/rustc_resolve/src/lib.rs26
-rw-r--r--compiler/rustc_resolve/src/macros.rs11
-rw-r--r--compiler/rustc_session/src/config.rs99
-rw-r--r--compiler/rustc_session/src/config/cfg.rs2
-rw-r--r--compiler/rustc_session/src/options.rs4
-rw-r--r--compiler/rustc_smir/src/alloc.rs9
-rw-r--r--compiler/rustc_smir/src/context/impls.rs2
-rw-r--r--compiler/rustc_span/src/symbol.rs1
-rw-r--r--compiler/rustc_target/src/callconv/loongarch.rs4
-rw-r--r--compiler/rustc_target/src/callconv/mips.rs2
-rw-r--r--compiler/rustc_target/src/callconv/mod.rs4
-rw-r--r--compiler/rustc_target/src/callconv/riscv.rs4
-rw-r--r--compiler/rustc_target/src/callconv/sparc.rs2
-rw-r--r--compiler/rustc_target/src/callconv/x86.rs2
-rw-r--r--compiler/rustc_target/src/spec/mod.rs31
-rw-r--r--compiler/rustc_trait_selection/src/error_reporting/traits/ambiguity.rs33
-rw-r--r--compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs2
-rw-r--r--compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs325
-rw-r--r--compiler/rustc_trait_selection/src/traits/effects.rs51
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs198
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/confirmation.rs86
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/mod.rs256
-rw-r--r--compiler/rustc_transmute/src/layout/tree.rs2
-rw-r--r--compiler/rustc_ty_utils/src/assoc.rs34
-rw-r--r--compiler/rustc_ty_utils/src/layout.rs4
-rw-r--r--compiler/stable_mir/src/mir/alloc.rs3
-rw-r--r--compiler/stable_mir/src/unstable/convert/stable/mir.rs3
163 files changed, 2402 insertions, 1296 deletions
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index a438545c76f..de4b5a46c81 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -221,6 +221,20 @@ impl ReprOptions {
 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
 
+/// How pointers are represented in a given address space
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub struct PointerSpec {
+    /// The size of the bitwise representation of the pointer.
+    pointer_size: Size,
+    /// The alignment of pointers for this address space
+    pointer_align: AbiAlign,
+    /// The size of the value a pointer can be offset by in this address space.
+    pointer_offset: Size,
+    /// Pointers into this address space contain extra metadata
+    /// FIXME(workingjubilee): Consider adequately reflecting this in the compiler?
+    _is_fat: bool,
+}
+
 /// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
 /// for a target, which contains everything needed to compute layouts.
 #[derive(Debug, PartialEq, Eq)]
@@ -236,13 +250,22 @@ pub struct TargetDataLayout {
     pub f32_align: AbiAlign,
     pub f64_align: AbiAlign,
     pub f128_align: AbiAlign,
-    pub pointer_size: Size,
-    pub pointer_align: AbiAlign,
     pub aggregate_align: AbiAlign,
 
     /// Alignments for vector types.
     pub vector_align: Vec<(Size, AbiAlign)>,
 
+    pub default_address_space: AddressSpace,
+    pub default_address_space_pointer_spec: PointerSpec,
+
+    /// Address space information of all known address spaces.
+    ///
+    /// # Note
+    ///
+    /// This vector does not contain the [`PointerSpec`] relative to the default address space,
+    /// which instead lives in [`Self::default_address_space_pointer_spec`].
+    address_space_info: Vec<(AddressSpace, PointerSpec)>,
+
     pub instruction_address_space: AddressSpace,
 
     /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
@@ -267,14 +290,20 @@ impl Default for TargetDataLayout {
             f32_align: AbiAlign::new(align(32)),
             f64_align: AbiAlign::new(align(64)),
             f128_align: AbiAlign::new(align(128)),
-            pointer_size: Size::from_bits(64),
-            pointer_align: AbiAlign::new(align(64)),
             aggregate_align: AbiAlign { abi: align(8) },
             vector_align: vec![
                 (Size::from_bits(64), AbiAlign::new(align(64))),
                 (Size::from_bits(128), AbiAlign::new(align(128))),
             ],
-            instruction_address_space: AddressSpace::DATA,
+            default_address_space: AddressSpace::ZERO,
+            default_address_space_pointer_spec: PointerSpec {
+                pointer_size: Size::from_bits(64),
+                pointer_align: AbiAlign::new(align(64)),
+                pointer_offset: Size::from_bits(64),
+                _is_fat: false,
+            },
+            address_space_info: vec![],
+            instruction_address_space: AddressSpace::ZERO,
             c_enum_min_size: Integer::I32,
         }
     }
@@ -288,6 +317,7 @@ pub enum TargetDataLayoutErrors<'a> {
     InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
     InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
     InvalidBitsSize { err: String },
+    UnknownPointerSpecification { err: String },
 }
 
 impl TargetDataLayout {
@@ -298,6 +328,7 @@ impl TargetDataLayout {
     /// determined from llvm string.
     pub fn parse_from_llvm_datalayout_string<'a>(
         input: &'a str,
+        default_address_space: AddressSpace,
     ) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
         // Parse an address space index from a string.
         let parse_address_space = |s: &'a str, cause: &'a str| {
@@ -321,19 +352,27 @@ impl TargetDataLayout {
             |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
 
         // Parse an alignment string.
-        let parse_align = |s: &[&'a str], cause: &'a str| {
-            if s.is_empty() {
-                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
-            }
+        let parse_align_str = |s: &'a str, cause: &'a str| {
             let align_from_bits = |bits| {
                 Align::from_bits(bits)
                     .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
             };
-            let abi = parse_bits(s[0], "alignment", cause)?;
+            let abi = parse_bits(s, "alignment", cause)?;
             Ok(AbiAlign::new(align_from_bits(abi)?))
         };
 
+        // Parse an alignment sequence, possibly in the form `<align>[:<preferred_alignment>]`,
+        // ignoring the secondary alignment specifications.
+        let parse_align_seq = |s: &[&'a str], cause: &'a str| {
+            if s.is_empty() {
+                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
+            }
+            parse_align_str(s[0], cause)
+        };
+
         let mut dl = TargetDataLayout::default();
+        dl.default_address_space = default_address_space;
+
         let mut i128_align_src = 64;
         for spec in input.split('-') {
             let spec_parts = spec.split(':').collect::<Vec<_>>();
@@ -344,24 +383,107 @@ impl TargetDataLayout {
                 [p] if p.starts_with('P') => {
                     dl.instruction_address_space = parse_address_space(&p[1..], "P")?
                 }
-                ["a", a @ ..] => dl.aggregate_align = parse_align(a, "a")?,
-                ["f16", a @ ..] => dl.f16_align = parse_align(a, "f16")?,
-                ["f32", a @ ..] => dl.f32_align = parse_align(a, "f32")?,
-                ["f64", a @ ..] => dl.f64_align = parse_align(a, "f64")?,
-                ["f128", a @ ..] => dl.f128_align = parse_align(a, "f128")?,
-                // FIXME(erikdesjardins): we should be parsing nonzero address spaces
-                // this will require replacing TargetDataLayout::{pointer_size,pointer_align}
-                // with e.g. `fn pointer_size_in(AddressSpace)`
-                [p @ "p", s, a @ ..] | [p @ "p0", s, a @ ..] => {
-                    dl.pointer_size = parse_size(s, p)?;
-                    dl.pointer_align = parse_align(a, p)?;
+                ["a", a @ ..] => dl.aggregate_align = parse_align_seq(a, "a")?,
+                ["f16", a @ ..] => dl.f16_align = parse_align_seq(a, "f16")?,
+                ["f32", a @ ..] => dl.f32_align = parse_align_seq(a, "f32")?,
+                ["f64", a @ ..] => dl.f64_align = parse_align_seq(a, "f64")?,
+                ["f128", a @ ..] => dl.f128_align = parse_align_seq(a, "f128")?,
+                [p, s, a @ ..] if p.starts_with("p") => {
+                    let mut p = p.strip_prefix('p').unwrap();
+                    let mut _is_fat = false;
+
+                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
+                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
+
+                    if p.starts_with('f') {
+                        p = p.strip_prefix('f').unwrap();
+                        _is_fat = true;
+                    }
+
+                    // However, we currently don't take into account further specifications:
+                    // an error is emitted instead.
+                    if p.starts_with(char::is_alphabetic) {
+                        return Err(TargetDataLayoutErrors::UnknownPointerSpecification {
+                            err: p.to_string(),
+                        });
+                    }
+
+                    let addr_space = if !p.is_empty() {
+                        parse_address_space(p, "p-")?
+                    } else {
+                        AddressSpace::ZERO
+                    };
+
+                    let pointer_size = parse_size(s, "p-")?;
+                    let pointer_align = parse_align_seq(a, "p-")?;
+                    let info = PointerSpec {
+                        pointer_offset: pointer_size,
+                        pointer_size,
+                        pointer_align,
+                        _is_fat,
+                    };
+                    if addr_space == default_address_space {
+                        dl.default_address_space_pointer_spec = info;
+                    } else {
+                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
+                            Some(e) => e.1 = info,
+                            None => {
+                                dl.address_space_info.push((addr_space, info));
+                            }
+                        }
+                    }
+                }
+                [p, s, a, _pr, i] if p.starts_with("p") => {
+                    let mut p = p.strip_prefix('p').unwrap();
+                    let mut _is_fat = false;
+
+                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
+                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
+
+                    if p.starts_with('f') {
+                        p = p.strip_prefix('f').unwrap();
+                        _is_fat = true;
+                    }
+
+                    // However, we currently don't take into account further specifications:
+                    // an error is emitted instead.
+                    if p.starts_with(char::is_alphabetic) {
+                        return Err(TargetDataLayoutErrors::UnknownPointerSpecification {
+                            err: p.to_string(),
+                        });
+                    }
+
+                    let addr_space = if !p.is_empty() {
+                        parse_address_space(p, "p")?
+                    } else {
+                        AddressSpace::ZERO
+                    };
+
+                    let info = PointerSpec {
+                        pointer_size: parse_size(s, "p-")?,
+                        pointer_align: parse_align_str(a, "p-")?,
+                        pointer_offset: parse_size(i, "p-")?,
+                        _is_fat,
+                    };
+
+                    if addr_space == default_address_space {
+                        dl.default_address_space_pointer_spec = info;
+                    } else {
+                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
+                            Some(e) => e.1 = info,
+                            None => {
+                                dl.address_space_info.push((addr_space, info));
+                            }
+                        }
+                    }
                 }
+
                 [s, a @ ..] if s.starts_with('i') => {
                     let Ok(bits) = s[1..].parse::<u64>() else {
                         parse_size(&s[1..], "i")?; // For the user error.
                         continue;
                     };
-                    let a = parse_align(a, s)?;
+                    let a = parse_align_seq(a, s)?;
                     match bits {
                         1 => dl.i1_align = a,
                         8 => dl.i8_align = a,
@@ -379,7 +501,7 @@ impl TargetDataLayout {
                 }
                 [s, a @ ..] if s.starts_with('v') => {
                     let v_size = parse_size(&s[1..], "v")?;
-                    let a = parse_align(a, s)?;
+                    let a = parse_align_seq(a, s)?;
                     if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
                         v.1 = a;
                         continue;
@@ -390,10 +512,27 @@ impl TargetDataLayout {
                 _ => {} // Ignore everything else.
             }
         }
+
+        // Inherit, if not given, address space information for specific LLVM elements from the
+        // default data address space.
+        if (dl.instruction_address_space != dl.default_address_space)
+            && dl
+                .address_space_info
+                .iter()
+                .find(|(a, _)| *a == dl.instruction_address_space)
+                .is_none()
+        {
+            dl.address_space_info.push((
+                dl.instruction_address_space,
+                dl.default_address_space_pointer_spec.clone(),
+            ));
+        }
+
         Ok(dl)
     }
 
-    /// Returns **exclusive** upper bound on object size in bytes.
+    /// Returns **exclusive** upper bound on object size in bytes, in the default data address
+    /// space.
     ///
     /// The theoretical maximum object size is defined as the maximum positive `isize` value.
     /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
@@ -404,7 +543,26 @@ impl TargetDataLayout {
     /// so we adopt such a more-constrained size bound due to its technical limitations.
     #[inline]
     pub fn obj_size_bound(&self) -> u64 {
-        match self.pointer_size.bits() {
+        match self.pointer_size().bits() {
+            16 => 1 << 15,
+            32 => 1 << 31,
+            64 => 1 << 61,
+            bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),
+        }
+    }
+
+    /// Returns **exclusive** upper bound on object size in bytes.
+    ///
+    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
+    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
+    /// index every address within an object along with one byte past the end, along with allowing
+    /// `isize` to store the difference between any two pointers into an object.
+    ///
+    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
+    /// so we adopt such a more-constrained size bound due to its technical limitations.
+    #[inline]
+    pub fn obj_size_bound_in(&self, address_space: AddressSpace) -> u64 {
+        match self.pointer_size_in(address_space).bits() {
             16 => 1 << 15,
             32 => 1 << 31,
             64 => 1 << 61,
@@ -415,7 +573,18 @@ impl TargetDataLayout {
     #[inline]
     pub fn ptr_sized_integer(&self) -> Integer {
         use Integer::*;
-        match self.pointer_size.bits() {
+        match self.pointer_offset().bits() {
+            16 => I16,
+            32 => I32,
+            64 => I64,
+            bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
+        }
+    }
+
+    #[inline]
+    pub fn ptr_sized_integer_in(&self, address_space: AddressSpace) -> Integer {
+        use Integer::*;
+        match self.pointer_offset_in(address_space).bits() {
             16 => I16,
             32 => I32,
             64 => I64,
@@ -439,6 +608,66 @@ impl TargetDataLayout {
             Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap(),
         ))
     }
+
+    /// Get the pointer size in the default data address space.
+    #[inline]
+    pub fn pointer_size(&self) -> Size {
+        self.default_address_space_pointer_spec.pointer_size
+    }
+
+    /// Get the pointer size in a specific address space.
+    #[inline]
+    pub fn pointer_size_in(&self, c: AddressSpace) -> Size {
+        if c == self.default_address_space {
+            return self.default_address_space_pointer_spec.pointer_size;
+        }
+
+        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
+            e.1.pointer_size
+        } else {
+            panic!("Use of unknown address space {c:?}");
+        }
+    }
+
+    /// Get the pointer index in the default data address space.
+    #[inline]
+    pub fn pointer_offset(&self) -> Size {
+        self.default_address_space_pointer_spec.pointer_offset
+    }
+
+    /// Get the pointer index in a specific address space.
+    #[inline]
+    pub fn pointer_offset_in(&self, c: AddressSpace) -> Size {
+        if c == self.default_address_space {
+            return self.default_address_space_pointer_spec.pointer_offset;
+        }
+
+        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
+            e.1.pointer_offset
+        } else {
+            panic!("Use of unknown address space {c:?}");
+        }
+    }
+
+    /// Get the pointer alignment in the default data address space.
+    #[inline]
+    pub fn pointer_align(&self) -> AbiAlign {
+        self.default_address_space_pointer_spec.pointer_align
+    }
+
+    /// Get the pointer alignment in a specific address space.
+    #[inline]
+    pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign {
+        if c == self.default_address_space {
+            return self.default_address_space_pointer_spec.pointer_align;
+        }
+
+        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
+            e.1.pointer_align
+        } else {
+            panic!("Use of unknown address space {c:?}");
+        }
+    }
 }
 
 pub trait HasDataLayout {
@@ -1100,10 +1329,7 @@ impl Primitive {
         match self {
             Int(i, _) => i.size(),
             Float(f) => f.size(),
-            // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
-            // different address spaces can have different sizes
-            // (but TargetDataLayout doesn't currently parse that part of the DL string)
-            Pointer(_) => dl.pointer_size,
+            Pointer(a) => dl.pointer_size_in(a),
         }
     }
 
@@ -1114,10 +1340,7 @@ impl Primitive {
         match self {
             Int(i, _) => i.align(dl),
             Float(f) => f.align(dl),
-            // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
-            // different address spaces can have different alignments
-            // (but TargetDataLayout doesn't currently parse that part of the DL string)
-            Pointer(_) => dl.pointer_align,
+            Pointer(a) => dl.pointer_align_in(a),
         }
     }
 }
@@ -1421,8 +1644,8 @@ impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
 pub struct AddressSpace(pub u32);
 
 impl AddressSpace {
-    /// The default address space, corresponding to data space.
-    pub const DATA: Self = AddressSpace(0);
+    /// LLVM's `0` address space.
+    pub const ZERO: Self = AddressSpace(0);
 }
 
 /// The way we represent values to the backend
diff --git a/compiler/rustc_ast_lowering/src/format.rs b/compiler/rustc_ast_lowering/src/format.rs
index 943cde90dd2..5b1dcab87b9 100644
--- a/compiler/rustc_ast_lowering/src/format.rs
+++ b/compiler/rustc_ast_lowering/src/format.rs
@@ -55,7 +55,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
     /// Get the maximum value of int_ty. It is platform-dependent due to the byte size of isize
     fn int_ty_max(&self, int_ty: IntTy) -> u128 {
         match int_ty {
-            IntTy::Isize => self.tcx.data_layout.pointer_size.signed_int_max() as u128,
+            IntTy::Isize => self.tcx.data_layout.pointer_size().signed_int_max() as u128,
             IntTy::I8 => i8::MAX as u128,
             IntTy::I16 => i16::MAX as u128,
             IntTy::I32 => i32::MAX as u128,
@@ -67,7 +67,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
     /// Get the maximum value of uint_ty. It is platform-dependent due to the byte size of usize
     fn uint_ty_max(&self, uint_ty: UintTy) -> u128 {
         match uint_ty {
-            UintTy::Usize => self.tcx.data_layout.pointer_size.unsigned_int_max(),
+            UintTy::Usize => self.tcx.data_layout.pointer_size().unsigned_int_max(),
             UintTy::U8 => u8::MAX as u128,
             UintTy::U16 => u16::MAX as u128,
             UintTy::U32 => u32::MAX as u128,
diff --git a/compiler/rustc_attr_data_structures/src/attributes.rs b/compiler/rustc_attr_data_structures/src/attributes.rs
index 2cbb7270785..b656db32528 100644
--- a/compiler/rustc_attr_data_structures/src/attributes.rs
+++ b/compiler/rustc_attr_data_structures/src/attributes.rs
@@ -240,6 +240,9 @@ pub enum AttributeKind {
     /// Represents [`#[doc]`](https://doc.rust-lang.org/stable/rustdoc/write-documentation/the-doc-attribute.html).
     DocComment { style: AttrStyle, kind: CommentKind, span: Span, comment: Symbol },
 
+    /// Represents `#[rustc_dummy]`.
+    Dummy,
+
     /// Represents [`#[export_name]`](https://doc.rust-lang.org/reference/abi.html#the-export_name-attribute).
     ExportName {
         /// The name to export this item with.
@@ -248,6 +251,15 @@ pub enum AttributeKind {
         span: Span,
     },
 
+    /// Represents `#[export_stable]`.
+    ExportStable,
+
+    /// Represents `#[ffi_const]`.
+    FfiConst(Span),
+
+    /// Represents `#[ffi_pure]`.
+    FfiPure(Span),
+
     /// Represents `#[ignore]`
     Ignore {
         span: Span,
@@ -326,6 +338,9 @@ pub enum AttributeKind {
         span: Span,
     },
 
+    /// Represents `#[rustc_std_internal_symbol]`.
+    StdInternalSymbol(Span),
+
     /// Represents `#[target_feature(enable = "...")]`
     TargetFeature(ThinVec<(Symbol, Span)>, Span),
 
diff --git a/compiler/rustc_attr_data_structures/src/encode_cross_crate.rs b/compiler/rustc_attr_data_structures/src/encode_cross_crate.rs
index a6ae49d2808..5414c7b103b 100644
--- a/compiler/rustc_attr_data_structures/src/encode_cross_crate.rs
+++ b/compiler/rustc_attr_data_structures/src/encode_cross_crate.rs
@@ -25,7 +25,11 @@ impl AttributeKind {
             ConstStabilityIndirect => No,
             Deprecation { .. } => Yes,
             DocComment { .. } => Yes,
+            Dummy => No,
             ExportName { .. } => Yes,
+            ExportStable => No,
+            FfiConst(..) => No,
+            FfiPure(..) => No,
             Ignore { .. } => No,
             Inline(..) => No,
             LinkName { .. } => Yes,
@@ -48,6 +52,7 @@ impl AttributeKind {
             RustcObjectLifetimeDefault => No,
             SkipDuringMethodDispatch { .. } => No,
             Stability { .. } => Yes,
+            StdInternalSymbol(..) => No,
             TargetFeature(..) => No,
             TrackCaller(..) => Yes,
             Used { .. } => No,
diff --git a/compiler/rustc_attr_parsing/messages.ftl b/compiler/rustc_attr_parsing/messages.ftl
index 8a709ea5d20..bec3a1e8a59 100644
--- a/compiler/rustc_attr_parsing/messages.ftl
+++ b/compiler/rustc_attr_parsing/messages.ftl
@@ -146,12 +146,12 @@ attr_parsing_unused_duplicate =
     unused attribute
     .suggestion = remove this attribute
     .note = attribute also specified here
-    .warn = {-passes_previously_accepted}
+    .warn = {-attr_parsing_previously_accepted}
 
 attr_parsing_unused_multiple =
     multiple `{$name}` attributes
     .suggestion = remove this attribute
     .note = attribute also specified here
 
--attr_parsing_perviously_accepted =
+-attr_parsing_previously_accepted =
     this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
diff --git a/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs b/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs
index cb3956d46a0..fdec09edaa1 100644
--- a/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/codegen_attrs.rs
@@ -15,7 +15,7 @@ pub(crate) struct OptimizeParser;
 
 impl<S: Stage> SingleAttributeParser<S> for OptimizeParser {
     const PATH: &[Symbol] = &[sym::optimize];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
     const TEMPLATE: AttributeTemplate = template!(List: "size|speed|none");
 
@@ -56,7 +56,7 @@ pub(crate) struct ExportNameParser;
 
 impl<S: Stage> SingleAttributeParser<S> for ExportNameParser {
     const PATH: &[rustc_span::Symbol] = &[sym::export_name];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
     const TEMPLATE: AttributeTemplate = template!(NameValueStr: "name");
 
diff --git a/compiler/rustc_attr_parsing/src/attributes/deprecation.rs b/compiler/rustc_attr_parsing/src/attributes/deprecation.rs
index 702ad66f578..08cf1ab5d19 100644
--- a/compiler/rustc_attr_parsing/src/attributes/deprecation.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/deprecation.rs
@@ -36,7 +36,7 @@ fn get<S: Stage>(
 
 impl<S: Stage> SingleAttributeParser<S> for DeprecationParser {
     const PATH: &[Symbol] = &[sym::deprecated];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
     const TEMPLATE: AttributeTemplate = template!(
         Word,
diff --git a/compiler/rustc_attr_parsing/src/attributes/dummy.rs b/compiler/rustc_attr_parsing/src/attributes/dummy.rs
new file mode 100644
index 00000000000..e5e1c3bb6b6
--- /dev/null
+++ b/compiler/rustc_attr_parsing/src/attributes/dummy.rs
@@ -0,0 +1,19 @@
+use rustc_attr_data_structures::AttributeKind;
+use rustc_feature::{AttributeTemplate, template};
+use rustc_span::{Symbol, sym};
+
+use crate::attributes::{AttributeOrder, OnDuplicate, SingleAttributeParser};
+use crate::context::{AcceptContext, Stage};
+use crate::parser::ArgParser;
+
+pub(crate) struct DummyParser;
+impl<S: Stage> SingleAttributeParser<S> for DummyParser {
+    const PATH: &[Symbol] = &[sym::rustc_dummy];
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
+    const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Ignore;
+    const TEMPLATE: AttributeTemplate = template!(Word); // Anything, really
+
+    fn convert(_: &mut AcceptContext<'_, '_, S>, _: &ArgParser<'_>) -> Option<AttributeKind> {
+        Some(AttributeKind::Dummy)
+    }
+}
diff --git a/compiler/rustc_attr_parsing/src/attributes/inline.rs b/compiler/rustc_attr_parsing/src/attributes/inline.rs
index 11844f4cd95..fe812175218 100644
--- a/compiler/rustc_attr_parsing/src/attributes/inline.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/inline.rs
@@ -16,7 +16,7 @@ pub(crate) struct InlineParser;
 
 impl<S: Stage> SingleAttributeParser<S> for InlineParser {
     const PATH: &'static [Symbol] = &[sym::inline];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
     const TEMPLATE: AttributeTemplate = template!(Word, List: "always|never");
 
@@ -57,7 +57,7 @@ pub(crate) struct RustcForceInlineParser;
 
 impl<S: Stage> SingleAttributeParser<S> for RustcForceInlineParser {
     const PATH: &'static [Symbol] = &[sym::rustc_force_inline];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
     const TEMPLATE: AttributeTemplate = template!(Word, List: "reason", NameValueStr: "reason");
 
diff --git a/compiler/rustc_attr_parsing/src/attributes/link_attrs.rs b/compiler/rustc_attr_parsing/src/attributes/link_attrs.rs
index e298053ab76..23a8e96482d 100644
--- a/compiler/rustc_attr_parsing/src/attributes/link_attrs.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/link_attrs.rs
@@ -1,9 +1,11 @@
 use rustc_attr_data_structures::AttributeKind;
 use rustc_attr_data_structures::AttributeKind::{LinkName, LinkSection};
 use rustc_feature::{AttributeTemplate, template};
-use rustc_span::{Symbol, sym};
+use rustc_span::{Span, Symbol, sym};
 
-use crate::attributes::{AttributeOrder, OnDuplicate, SingleAttributeParser};
+use crate::attributes::{
+    AttributeOrder, NoArgsAttributeParser, OnDuplicate, SingleAttributeParser,
+};
 use crate::context::{AcceptContext, Stage};
 use crate::parser::ArgParser;
 use crate::session_diagnostics::NullOnLinkSection;
@@ -12,7 +14,7 @@ pub(crate) struct LinkNameParser;
 
 impl<S: Stage> SingleAttributeParser<S> for LinkNameParser {
     const PATH: &[Symbol] = &[sym::link_name];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
     const TEMPLATE: AttributeTemplate = template!(NameValueStr: "name");
 
@@ -34,7 +36,7 @@ pub(crate) struct LinkSectionParser;
 
 impl<S: Stage> SingleAttributeParser<S> for LinkSectionParser {
     const PATH: &[Symbol] = &[sym::link_section];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
     const TEMPLATE: AttributeTemplate = template!(NameValueStr: "name");
 
@@ -57,3 +59,31 @@ impl<S: Stage> SingleAttributeParser<S> for LinkSectionParser {
         Some(LinkSection { name, span: cx.attr_span })
     }
 }
+
+pub(crate) struct ExportStableParser;
+impl<S: Stage> NoArgsAttributeParser<S> for ExportStableParser {
+    const PATH: &[Symbol] = &[sym::export_stable];
+    const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
+    const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::ExportStable;
+}
+
+pub(crate) struct FfiConstParser;
+impl<S: Stage> NoArgsAttributeParser<S> for FfiConstParser {
+    const PATH: &[Symbol] = &[sym::ffi_const];
+    const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
+    const CREATE: fn(Span) -> AttributeKind = AttributeKind::FfiConst;
+}
+
+pub(crate) struct FfiPureParser;
+impl<S: Stage> NoArgsAttributeParser<S> for FfiPureParser {
+    const PATH: &[Symbol] = &[sym::ffi_pure];
+    const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
+    const CREATE: fn(Span) -> AttributeKind = AttributeKind::FfiPure;
+}
+
+pub(crate) struct StdInternalSymbolParser;
+impl<S: Stage> NoArgsAttributeParser<S> for StdInternalSymbolParser {
+    const PATH: &[Symbol] = &[sym::rustc_std_internal_symbol];
+    const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
+    const CREATE: fn(Span) -> AttributeKind = AttributeKind::StdInternalSymbol;
+}
diff --git a/compiler/rustc_attr_parsing/src/attributes/mod.rs b/compiler/rustc_attr_parsing/src/attributes/mod.rs
index ba7572434df..68c716d1a99 100644
--- a/compiler/rustc_attr_parsing/src/attributes/mod.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/mod.rs
@@ -30,6 +30,7 @@ pub(crate) mod cfg;
 pub(crate) mod codegen_attrs;
 pub(crate) mod confusables;
 pub(crate) mod deprecation;
+pub(crate) mod dummy;
 pub(crate) mod inline;
 pub(crate) mod link_attrs;
 pub(crate) mod lint_helpers;
@@ -139,7 +140,7 @@ impl<T: SingleAttributeParser<S>, S: Stage> AttributeParser<S> for Single<T, S>
             if let Some(pa) = T::convert(cx, args) {
                 match T::ATTRIBUTE_ORDER {
                     // keep the first and report immediately. ignore this attribute
-                    AttributeOrder::KeepFirst => {
+                    AttributeOrder::KeepInnermost => {
                         if let Some((_, unused)) = group.1 {
                             T::ON_DUPLICATE.exec::<T>(cx, cx.attr_span, unused);
                             return;
@@ -147,7 +148,7 @@ impl<T: SingleAttributeParser<S>, S: Stage> AttributeParser<S> for Single<T, S>
                     }
                     // keep the new one and warn about the previous,
                     // then replace
-                    AttributeOrder::KeepLast => {
+                    AttributeOrder::KeepOutermost => {
                         if let Some((_, used)) = group.1 {
                             T::ON_DUPLICATE.exec::<T>(cx, used, cx.attr_span);
                         }
@@ -164,9 +165,6 @@ impl<T: SingleAttributeParser<S>, S: Stage> AttributeParser<S> for Single<T, S>
     }
 }
 
-// FIXME(jdonszelmann): logic is implemented but the attribute parsers needing
-// them will be merged in another PR
-#[allow(unused)]
 pub(crate) enum OnDuplicate<S: Stage> {
     /// Give a default warning
     Warn,
@@ -212,25 +210,29 @@ impl<S: Stage> OnDuplicate<S> {
         }
     }
 }
-//
-// FIXME(jdonszelmann): logic is implemented but the attribute parsers needing
-// them will be merged in another PR
-#[allow(unused)]
+
 pub(crate) enum AttributeOrder {
-    /// Duplicates after the first attribute will be an error.
+    /// Duplicates after the innermost instance of the attribute will be an error/warning.
+    /// Only keep the lowest attribute.
     ///
-    /// This should be used where duplicates would be ignored, but carry extra
-    /// meaning that could cause confusion. For example, `#[stable(since="1.0")]
-    /// #[stable(since="2.0")]`, which version should be used for `stable`?
-    KeepFirst,
-
-    /// Duplicates preceding the last instance of the attribute will be a
-    /// warning, with a note that this will be an error in the future.
+    /// Attributes are processed from bottom to top, so this raises a warning/error on all the attributes
+    /// further above the lowest one:
+    /// ```
+    /// #[stable(since="1.0")] //~ WARNING duplicated attribute
+    /// #[stable(since="2.0")]
+    /// ```
+    KeepInnermost,
+
+    /// Duplicates before the outermost instance of the attribute will be an error/warning.
+    /// Only keep the highest attribute.
     ///
-    /// This is the same as `FutureWarnFollowing`, except the last attribute is
-    /// the one that is "used". Ideally these can eventually migrate to
-    /// `ErrorPreceding`.
-    KeepLast,
+    /// Attributes are processed from bottom to top, so this raises a warning/error on all the attributes
+    /// below the highest one:
+    /// ```
+    /// #[path="foo.rs"]
+    /// #[path="bar.rs"] //~ WARNING duplicated attribute
+    /// ```
+    KeepOutermost,
 }
 
 /// An even simpler version of [`SingleAttributeParser`]:
@@ -256,7 +258,7 @@ impl<T: NoArgsAttributeParser<S>, S: Stage> Default for WithoutArgs<T, S> {
 
 impl<T: NoArgsAttributeParser<S>, S: Stage> SingleAttributeParser<S> for WithoutArgs<T, S> {
     const PATH: &[Symbol] = T::PATH;
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
     const ON_DUPLICATE: OnDuplicate<S> = T::ON_DUPLICATE;
     const TEMPLATE: AttributeTemplate = template!(Word);
 
diff --git a/compiler/rustc_attr_parsing/src/attributes/must_use.rs b/compiler/rustc_attr_parsing/src/attributes/must_use.rs
index b5eb85f68b4..e0a3e675509 100644
--- a/compiler/rustc_attr_parsing/src/attributes/must_use.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/must_use.rs
@@ -12,7 +12,7 @@ pub(crate) struct MustUseParser;
 
 impl<S: Stage> SingleAttributeParser<S> for MustUseParser {
     const PATH: &[Symbol] = &[sym::must_use];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
     const TEMPLATE: AttributeTemplate = template!(Word, NameValueStr: "reason");
 
diff --git a/compiler/rustc_attr_parsing/src/attributes/path.rs b/compiler/rustc_attr_parsing/src/attributes/path.rs
index 0dfbc9a9aa8..febb1b45a18 100644
--- a/compiler/rustc_attr_parsing/src/attributes/path.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/path.rs
@@ -10,7 +10,7 @@ pub(crate) struct PathParser;
 
 impl<S: Stage> SingleAttributeParser<S> for PathParser {
     const PATH: &[Symbol] = &[sym::path];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError;
     const TEMPLATE: AttributeTemplate = template!(NameValueStr: "file");
 
diff --git a/compiler/rustc_attr_parsing/src/attributes/rustc_internal.rs b/compiler/rustc_attr_parsing/src/attributes/rustc_internal.rs
index e6b6a6fe3c9..ec821cb11ce 100644
--- a/compiler/rustc_attr_parsing/src/attributes/rustc_internal.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/rustc_internal.rs
@@ -11,7 +11,7 @@ pub(crate) struct RustcLayoutScalarValidRangeStart;
 
 impl<S: Stage> SingleAttributeParser<S> for RustcLayoutScalarValidRangeStart {
     const PATH: &'static [Symbol] = &[sym::rustc_layout_scalar_valid_range_start];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
     const TEMPLATE: AttributeTemplate = template!(List: "start");
 
@@ -25,7 +25,7 @@ pub(crate) struct RustcLayoutScalarValidRangeEnd;
 
 impl<S: Stage> SingleAttributeParser<S> for RustcLayoutScalarValidRangeEnd {
     const PATH: &'static [Symbol] = &[sym::rustc_layout_scalar_valid_range_end];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
     const TEMPLATE: AttributeTemplate = template!(List: "end");
 
@@ -62,7 +62,7 @@ pub(crate) struct RustcObjectLifetimeDefaultParser;
 
 impl<S: Stage> SingleAttributeParser<S> for RustcObjectLifetimeDefaultParser {
     const PATH: &[rustc_span::Symbol] = &[sym::rustc_object_lifetime_default];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
     const TEMPLATE: AttributeTemplate = template!(Word);
 
diff --git a/compiler/rustc_attr_parsing/src/attributes/test_attrs.rs b/compiler/rustc_attr_parsing/src/attributes/test_attrs.rs
index cea3ee52ff4..ee81f64860f 100644
--- a/compiler/rustc_attr_parsing/src/attributes/test_attrs.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/test_attrs.rs
@@ -11,7 +11,7 @@ pub(crate) struct IgnoreParser;
 
 impl<S: Stage> SingleAttributeParser<S> for IgnoreParser {
     const PATH: &[Symbol] = &[sym::ignore];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepLast;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
     const TEMPLATE: AttributeTemplate = template!(Word, NameValueStr: "reason");
 
diff --git a/compiler/rustc_attr_parsing/src/attributes/traits.rs b/compiler/rustc_attr_parsing/src/attributes/traits.rs
index 83a98c53c7f..c29dbf4d1e9 100644
--- a/compiler/rustc_attr_parsing/src/attributes/traits.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/traits.rs
@@ -12,7 +12,7 @@ pub(crate) struct SkipDuringMethodDispatchParser;
 
 impl<S: Stage> SingleAttributeParser<S> for SkipDuringMethodDispatchParser {
     const PATH: &[Symbol] = &[sym::rustc_skip_during_method_dispatch];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
 
     const TEMPLATE: AttributeTemplate = template!(List: "array, boxed_slice");
diff --git a/compiler/rustc_attr_parsing/src/attributes/transparency.rs b/compiler/rustc_attr_parsing/src/attributes/transparency.rs
index ce5ceb9139a..c9fdc57cc06 100644
--- a/compiler/rustc_attr_parsing/src/attributes/transparency.rs
+++ b/compiler/rustc_attr_parsing/src/attributes/transparency.rs
@@ -14,7 +14,7 @@ pub(crate) struct TransparencyParser;
 #[allow(rustc::diagnostic_outside_of_impl)]
 impl<S: Stage> SingleAttributeParser<S> for TransparencyParser {
     const PATH: &[Symbol] = &[sym::rustc_macro_transparency];
-    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepFirst;
+    const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
     const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Custom(|cx, used, unused| {
         cx.dcx().span_err(vec![used, unused], "multiple macro transparency attributes");
     });
diff --git a/compiler/rustc_attr_parsing/src/context.rs b/compiler/rustc_attr_parsing/src/context.rs
index 939f4a6fde7..dbe1a5b2ad0 100644
--- a/compiler/rustc_attr_parsing/src/context.rs
+++ b/compiler/rustc_attr_parsing/src/context.rs
@@ -21,8 +21,12 @@ use crate::attributes::codegen_attrs::{
 };
 use crate::attributes::confusables::ConfusablesParser;
 use crate::attributes::deprecation::DeprecationParser;
+use crate::attributes::dummy::DummyParser;
 use crate::attributes::inline::{InlineParser, RustcForceInlineParser};
-use crate::attributes::link_attrs::{LinkNameParser, LinkSectionParser};
+use crate::attributes::link_attrs::{
+    ExportStableParser, FfiConstParser, FfiPureParser, LinkNameParser, LinkSectionParser,
+    StdInternalSymbolParser,
+};
 use crate::attributes::lint_helpers::{AsPtrParser, PassByValueParser, PubTransparentParser};
 use crate::attributes::loop_match::{ConstContinueParser, LoopMatchParser};
 use crate::attributes::must_use::MustUseParser;
@@ -127,6 +131,7 @@ attribute_parsers!(
 
         // tidy-alphabetical-start
         Single<DeprecationParser>,
+        Single<DummyParser>,
         Single<ExportNameParser>,
         Single<IgnoreParser>,
         Single<InlineParser>,
@@ -145,6 +150,9 @@ attribute_parsers!(
         Single<WithoutArgs<ColdParser>>,
         Single<WithoutArgs<ConstContinueParser>>,
         Single<WithoutArgs<ConstStabilityIndirectParser>>,
+        Single<WithoutArgs<ExportStableParser>>,
+        Single<WithoutArgs<FfiConstParser>>,
+        Single<WithoutArgs<FfiPureParser>>,
         Single<WithoutArgs<LoopMatchParser>>,
         Single<WithoutArgs<MayDangleParser>>,
         Single<WithoutArgs<NoImplicitPreludeParser>>,
@@ -152,6 +160,7 @@ attribute_parsers!(
         Single<WithoutArgs<NonExhaustiveParser>>,
         Single<WithoutArgs<PassByValueParser>>,
         Single<WithoutArgs<PubTransparentParser>>,
+        Single<WithoutArgs<StdInternalSymbolParser>>,
         Single<WithoutArgs<TrackCallerParser>>,
         // tidy-alphabetical-end
     ];
diff --git a/compiler/rustc_borrowck/src/consumers.rs b/compiler/rustc_borrowck/src/consumers.rs
index 1f087b09234..1c4ff5a6779 100644
--- a/compiler/rustc_borrowck/src/consumers.rs
+++ b/compiler/rustc_borrowck/src/consumers.rs
@@ -1,7 +1,9 @@
 //! This file provides API for compiler consumers.
 
+use rustc_data_structures::fx::FxHashMap;
 use rustc_hir::def_id::LocalDefId;
 use rustc_index::IndexVec;
+use rustc_middle::bug;
 use rustc_middle::mir::{Body, Promoted};
 use rustc_middle::ty::TyCtxt;
 
@@ -17,7 +19,39 @@ pub use super::polonius::legacy::{
 pub use super::region_infer::RegionInferenceContext;
 use crate::{BorrowCheckRootCtxt, do_mir_borrowck};
 
-/// Options determining the output behavior of [`get_body_with_borrowck_facts`].
+/// Struct used during mir borrowck to collect bodies with facts for a typeck root and all
+/// its nested bodies.
+pub(crate) struct BorrowckConsumer<'tcx> {
+    options: ConsumerOptions,
+    bodies: FxHashMap<LocalDefId, BodyWithBorrowckFacts<'tcx>>,
+}
+
+impl<'tcx> BorrowckConsumer<'tcx> {
+    pub(crate) fn new(options: ConsumerOptions) -> Self {
+        Self { options, bodies: Default::default() }
+    }
+
+    pub(crate) fn insert_body(&mut self, def_id: LocalDefId, body: BodyWithBorrowckFacts<'tcx>) {
+        if self.bodies.insert(def_id, body).is_some() {
+            bug!("unexpected previous body for {def_id:?}");
+        }
+    }
+
+    /// Should the Polonius input facts be computed?
+    pub(crate) fn polonius_input(&self) -> bool {
+        matches!(
+            self.options,
+            ConsumerOptions::PoloniusInputFacts | ConsumerOptions::PoloniusOutputFacts
+        )
+    }
+
+    /// Should we run Polonius and collect the output facts?
+    pub(crate) fn polonius_output(&self) -> bool {
+        matches!(self.options, ConsumerOptions::PoloniusOutputFacts)
+    }
+}
+
+/// Options determining the output behavior of [`get_bodies_with_borrowck_facts`].
 ///
 /// If executing under `-Z polonius` the choice here has no effect, and everything as if
 /// [`PoloniusOutputFacts`](ConsumerOptions::PoloniusOutputFacts) had been selected
@@ -43,17 +77,6 @@ pub enum ConsumerOptions {
     PoloniusOutputFacts,
 }
 
-impl ConsumerOptions {
-    /// Should the Polonius input facts be computed?
-    pub(crate) fn polonius_input(&self) -> bool {
-        matches!(self, Self::PoloniusInputFacts | Self::PoloniusOutputFacts)
-    }
-    /// Should we run Polonius and collect the output facts?
-    pub(crate) fn polonius_output(&self) -> bool {
-        matches!(self, Self::PoloniusOutputFacts)
-    }
-}
-
 /// A `Body` with information computed by the borrow checker. This struct is
 /// intended to be consumed by compiler consumers.
 ///
@@ -82,25 +105,35 @@ pub struct BodyWithBorrowckFacts<'tcx> {
     pub output_facts: Option<Box<PoloniusOutput>>,
 }
 
-/// This function computes borrowck facts for the given body. The [`ConsumerOptions`]
-/// determine which facts are returned. This function makes a copy of the body because
-/// it needs to regenerate the region identifiers. It should never be invoked during a
-/// typical compilation session due to the unnecessary overhead of returning
-/// [`BodyWithBorrowckFacts`].
+/// This function computes borrowck facts for the given def id and all its nested bodies.
+/// It must be called with a typeck root which will then borrowck all nested bodies as well.
+/// The [`ConsumerOptions`] determine which facts are returned. This function makes a copy
+/// of the bodies because it needs to regenerate the region identifiers. It should never be
+/// invoked during a typical compilation session due to the unnecessary overhead of
+/// returning [`BodyWithBorrowckFacts`].
 ///
 /// Note:
-/// *   This function will panic if the required body was already stolen. This
+/// *   This function will panic if the required bodies were already stolen. This
 ///     can, for example, happen when requesting a body of a `const` function
 ///     because they are evaluated during typechecking. The panic can be avoided
 ///     by overriding the `mir_borrowck` query. You can find a complete example
-///     that shows how to do this at `tests/run-make/obtain-borrowck/`.
+///     that shows how to do this at `tests/ui-fulldeps/obtain-borrowck.rs`.
 ///
 /// *   Polonius is highly unstable, so expect regular changes in its signature or other details.
-pub fn get_body_with_borrowck_facts(
+pub fn get_bodies_with_borrowck_facts(
     tcx: TyCtxt<'_>,
-    def_id: LocalDefId,
+    root_def_id: LocalDefId,
     options: ConsumerOptions,
-) -> BodyWithBorrowckFacts<'_> {
-    let mut root_cx = BorrowCheckRootCtxt::new(tcx, def_id);
-    *do_mir_borrowck(&mut root_cx, def_id, Some(options)).1.unwrap()
+) -> FxHashMap<LocalDefId, BodyWithBorrowckFacts<'_>> {
+    let mut root_cx =
+        BorrowCheckRootCtxt::new(tcx, root_def_id, Some(BorrowckConsumer::new(options)));
+
+    // See comment in `rustc_borrowck::mir_borrowck`
+    let nested_bodies = tcx.nested_bodies_within(root_def_id);
+    for def_id in nested_bodies {
+        root_cx.get_or_insert_nested(def_id);
+    }
+
+    do_mir_borrowck(&mut root_cx, root_def_id);
+    root_cx.consumer.unwrap().bodies
 }
diff --git a/compiler/rustc_borrowck/src/lib.rs b/compiler/rustc_borrowck/src/lib.rs
index 82b300dcb17..321b18c9b78 100644
--- a/compiler/rustc_borrowck/src/lib.rs
+++ b/compiler/rustc_borrowck/src/lib.rs
@@ -51,7 +51,7 @@ use smallvec::SmallVec;
 use tracing::{debug, instrument};
 
 use crate::borrow_set::{BorrowData, BorrowSet};
-use crate::consumers::{BodyWithBorrowckFacts, ConsumerOptions};
+use crate::consumers::BodyWithBorrowckFacts;
 use crate::dataflow::{BorrowIndex, Borrowck, BorrowckDomain, Borrows};
 use crate::diagnostics::{
     AccessKind, BorrowckDiagnosticsBuffer, IllegalMoveOriginKind, MoveError, RegionName,
@@ -124,7 +124,7 @@ fn mir_borrowck(
         let opaque_types = ConcreteOpaqueTypes(Default::default());
         Ok(tcx.arena.alloc(opaque_types))
     } else {
-        let mut root_cx = BorrowCheckRootCtxt::new(tcx, def);
+        let mut root_cx = BorrowCheckRootCtxt::new(tcx, def, None);
         // We need to manually borrowck all nested bodies from the HIR as
         // we do not generate MIR for dead code. Not doing so causes us to
         // never check closures in dead code.
@@ -134,7 +134,7 @@ fn mir_borrowck(
         }
 
         let PropagatedBorrowCheckResults { closure_requirements, used_mut_upvars } =
-            do_mir_borrowck(&mut root_cx, def, None).0;
+            do_mir_borrowck(&mut root_cx, def);
         debug_assert!(closure_requirements.is_none());
         debug_assert!(used_mut_upvars.is_empty());
         root_cx.finalize()
@@ -289,17 +289,12 @@ impl<'tcx> ClosureOutlivesSubjectTy<'tcx> {
 
 /// Perform the actual borrow checking.
 ///
-/// Use `consumer_options: None` for the default behavior of returning
-/// [`PropagatedBorrowCheckResults`] only. Otherwise, return [`BodyWithBorrowckFacts`]
-/// according to the given [`ConsumerOptions`].
-///
 /// For nested bodies this should only be called through `root_cx.get_or_insert_nested`.
 #[instrument(skip(root_cx), level = "debug")]
 fn do_mir_borrowck<'tcx>(
     root_cx: &mut BorrowCheckRootCtxt<'tcx>,
     def: LocalDefId,
-    consumer_options: Option<ConsumerOptions>,
-) -> (PropagatedBorrowCheckResults<'tcx>, Option<Box<BodyWithBorrowckFacts<'tcx>>>) {
+) -> PropagatedBorrowCheckResults<'tcx> {
     let tcx = root_cx.tcx;
     let infcx = BorrowckInferCtxt::new(tcx, def);
     let (input_body, promoted) = tcx.mir_promoted(def);
@@ -343,7 +338,6 @@ fn do_mir_borrowck<'tcx>(
         &location_table,
         &move_data,
         &borrow_set,
-        consumer_options,
     );
 
     // Dump MIR results into a file, if that is enabled. This lets us
@@ -483,23 +477,24 @@ fn do_mir_borrowck<'tcx>(
         used_mut_upvars: mbcx.used_mut_upvars,
     };
 
-    let body_with_facts = if consumer_options.is_some() {
-        Some(Box::new(BodyWithBorrowckFacts {
-            body: body_owned,
-            promoted,
-            borrow_set,
-            region_inference_context: regioncx,
-            location_table: polonius_input.as_ref().map(|_| location_table),
-            input_facts: polonius_input,
-            output_facts: polonius_output,
-        }))
-    } else {
-        None
-    };
+    if let Some(consumer) = &mut root_cx.consumer {
+        consumer.insert_body(
+            def,
+            BodyWithBorrowckFacts {
+                body: body_owned,
+                promoted,
+                borrow_set,
+                region_inference_context: regioncx,
+                location_table: polonius_input.as_ref().map(|_| location_table),
+                input_facts: polonius_input,
+                output_facts: polonius_output,
+            },
+        );
+    }
 
     debug!("do_mir_borrowck: result = {:#?}", result);
 
-    (result, body_with_facts)
+    result
 }
 
 fn get_flow_results<'a, 'tcx>(
diff --git a/compiler/rustc_borrowck/src/nll.rs b/compiler/rustc_borrowck/src/nll.rs
index af450507296..41f67e78930 100644
--- a/compiler/rustc_borrowck/src/nll.rs
+++ b/compiler/rustc_borrowck/src/nll.rs
@@ -18,7 +18,6 @@ use rustc_span::sym;
 use tracing::{debug, instrument};
 
 use crate::borrow_set::BorrowSet;
-use crate::consumers::ConsumerOptions;
 use crate::diagnostics::RegionErrors;
 use crate::handle_placeholders::compute_sccs_applying_placeholder_outlives_constraints;
 use crate::polonius::PoloniusDiagnosticsContext;
@@ -83,12 +82,11 @@ pub(crate) fn compute_regions<'tcx>(
     location_table: &PoloniusLocationTable,
     move_data: &MoveData<'tcx>,
     borrow_set: &BorrowSet<'tcx>,
-    consumer_options: Option<ConsumerOptions>,
 ) -> NllOutput<'tcx> {
     let is_polonius_legacy_enabled = infcx.tcx.sess.opts.unstable_opts.polonius.is_legacy_enabled();
-    let polonius_input = consumer_options.map(|c| c.polonius_input()).unwrap_or_default()
+    let polonius_input = root_cx.consumer.as_ref().map_or(false, |c| c.polonius_input())
         || is_polonius_legacy_enabled;
-    let polonius_output = consumer_options.map(|c| c.polonius_output()).unwrap_or_default()
+    let polonius_output = root_cx.consumer.as_ref().map_or(false, |c| c.polonius_output())
         || is_polonius_legacy_enabled;
     let mut polonius_facts =
         (polonius_input || PoloniusFacts::enabled(infcx.tcx)).then_some(PoloniusFacts::default());
diff --git a/compiler/rustc_borrowck/src/root_cx.rs b/compiler/rustc_borrowck/src/root_cx.rs
index 66b526fa02a..9b1d12aede5 100644
--- a/compiler/rustc_borrowck/src/root_cx.rs
+++ b/compiler/rustc_borrowck/src/root_cx.rs
@@ -6,6 +6,7 @@ use rustc_middle::ty::{OpaqueHiddenType, Ty, TyCtxt, TypeVisitableExt};
 use rustc_span::ErrorGuaranteed;
 use smallvec::SmallVec;
 
+use crate::consumers::BorrowckConsumer;
 use crate::{ClosureRegionRequirements, ConcreteOpaqueTypes, PropagatedBorrowCheckResults};
 
 /// The shared context used by both the root as well as all its nested
@@ -16,16 +17,24 @@ pub(super) struct BorrowCheckRootCtxt<'tcx> {
     concrete_opaque_types: ConcreteOpaqueTypes<'tcx>,
     nested_bodies: FxHashMap<LocalDefId, PropagatedBorrowCheckResults<'tcx>>,
     tainted_by_errors: Option<ErrorGuaranteed>,
+    /// This should be `None` during normal compilation. See [`crate::consumers`] for more
+    /// information on how this is used.
+    pub(crate) consumer: Option<BorrowckConsumer<'tcx>>,
 }
 
 impl<'tcx> BorrowCheckRootCtxt<'tcx> {
-    pub(super) fn new(tcx: TyCtxt<'tcx>, root_def_id: LocalDefId) -> BorrowCheckRootCtxt<'tcx> {
+    pub(super) fn new(
+        tcx: TyCtxt<'tcx>,
+        root_def_id: LocalDefId,
+        consumer: Option<BorrowckConsumer<'tcx>>,
+    ) -> BorrowCheckRootCtxt<'tcx> {
         BorrowCheckRootCtxt {
             tcx,
             root_def_id,
             concrete_opaque_types: Default::default(),
             nested_bodies: Default::default(),
             tainted_by_errors: None,
+            consumer,
         }
     }
 
@@ -71,7 +80,7 @@ impl<'tcx> BorrowCheckRootCtxt<'tcx> {
             self.root_def_id.to_def_id()
         );
         if !self.nested_bodies.contains_key(&def_id) {
-            let result = super::do_mir_borrowck(self, def_id, None).0;
+            let result = super::do_mir_borrowck(self, def_id);
             if let Some(prev) = self.nested_bodies.insert(def_id, result) {
                 bug!("unexpected previous nested body: {prev:?}");
             }
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
index 8965e4a944d..7d0731c77bd 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -786,7 +786,7 @@ pub(crate) fn codegen_drop<'tcx>(
 
 pub(crate) fn lib_call_arg_param(tcx: TyCtxt<'_>, ty: Type, is_signed: bool) -> AbiParam {
     let param = AbiParam::new(ty);
-    if ty.is_int() && u64::from(ty.bits()) < tcx.data_layout.pointer_size.bits() {
+    if ty.is_int() && u64::from(ty.bits()) < tcx.data_layout.pointer_size().bits() {
         match (&*tcx.sess.target.arch, &*tcx.sess.target.vendor) {
             ("x86_64", _) | ("aarch64", "apple") => match (ty, is_signed) {
                 (types::I8 | types::I16, true) => param.sext(),
diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
index cd0afee0cfb..2031842062d 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
@@ -127,7 +127,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
             PassMode::Indirect { attrs, meta_attrs: None, on_stack } => {
                 if on_stack {
                     // Abi requires aligning struct size to pointer size
-                    let size = self.layout.size.align_to(tcx.data_layout.pointer_align.abi);
+                    let size = self.layout.size.align_to(tcx.data_layout.pointer_align().abi);
                     let size = u32::try_from(size.bytes()).unwrap();
                     smallvec![apply_attrs_to_abi_param(
                         AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructArgument(size),),
diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs
index 2f11b2d2dcc..2fbe5c02802 100644
--- a/compiler/rustc_codegen_cranelift/src/common.rs
+++ b/compiler/rustc_codegen_cranelift/src/common.rs
@@ -15,7 +15,7 @@ use crate::debuginfo::FunctionDebugContext;
 use crate::prelude::*;
 
 pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type {
-    match tcx.data_layout.pointer_size.bits() {
+    match tcx.data_layout.pointer_size().bits() {
         16 => types::I16,
         32 => types::I32,
         64 => types::I64,
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
index ee43eb736e6..85adf0f3716 100644
--- a/compiler/rustc_codegen_cranelift/src/constant.rs
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -175,6 +175,13 @@ pub(crate) fn codegen_const_value<'tcx>(
                             fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
                         fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
                     }
+                    GlobalAlloc::TypeId { .. } => {
+                        return CValue::const_val(
+                            fx,
+                            layout,
+                            ScalarInt::try_from_target_usize(offset.bytes(), fx.tcx).unwrap(),
+                        );
+                    }
                     GlobalAlloc::Static(def_id) => {
                         assert!(fx.tcx.is_static(def_id));
                         let data_id = data_id_for_static(
@@ -360,6 +367,7 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant
                     GlobalAlloc::Memory(alloc) => alloc,
                     GlobalAlloc::Function { .. }
                     | GlobalAlloc::Static(_)
+                    | GlobalAlloc::TypeId { .. }
                     | GlobalAlloc::VTable(..) => {
                         unreachable!()
                     }
@@ -443,7 +451,7 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant
             let addend = {
                 let endianness = tcx.data_layout.endian;
                 let offset = offset.bytes() as usize;
-                let ptr_size = tcx.data_layout.pointer_size;
+                let ptr_size = tcx.data_layout.pointer_size();
                 let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(
                     offset..offset + ptr_size.bytes() as usize,
                 );
@@ -471,6 +479,11 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant
                         .principal()
                         .map(|principal| tcx.instantiate_bound_regions_with_erased(principal)),
                 ),
+                GlobalAlloc::TypeId { .. } => {
+                    // Nothing to do, the bytes/offset of this pointer have already been written together with all other bytes,
+                    // so we just need to drop this provenance.
+                    continue;
+                }
                 GlobalAlloc::Static(def_id) => {
                     if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
                     {
diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs
index dd582834fac..28848ca6184 100644
--- a/compiler/rustc_codegen_gcc/src/common.rs
+++ b/compiler/rustc_codegen_gcc/src/common.rs
@@ -1,7 +1,6 @@
 use gccjit::{LValue, RValue, ToRValue, Type};
-use rustc_abi as abi;
-use rustc_abi::HasDataLayout;
 use rustc_abi::Primitive::Pointer;
+use rustc_abi::{self as abi, HasDataLayout};
 use rustc_codegen_ssa::traits::{
     BaseTypeCodegenMethods, ConstCodegenMethods, MiscCodegenMethods, StaticCodegenMethods,
 };
@@ -162,7 +161,7 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> {
     }
 
     fn const_usize(&self, i: u64) -> RValue<'gcc> {
-        let bit_size = self.data_layout().pointer_size.bits();
+        let bit_size = self.data_layout().pointer_size().bits();
         if bit_size < 64 {
             // make sure it doesn't overflow
             assert!(i < (1 << bit_size));
@@ -282,6 +281,13 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> {
                         let init = self.const_data_from_alloc(alloc);
                         self.static_addr_of(init, alloc.inner().align, None)
                     }
+                    GlobalAlloc::TypeId { .. } => {
+                        let val = self.const_usize(offset.bytes());
+                        // This is still a variable of pointer type, even though we only use the provenance
+                        // of that pointer in CTFE and Miri. But to make LLVM's type system happy,
+                        // we need an int-to-ptr cast here (it doesn't matter at all which provenance that picks).
+                        return self.context.new_cast(None, val, ty);
+                    }
                     GlobalAlloc::Static(def_id) => {
                         assert!(self.tcx.is_static(def_id));
                         self.get_static(def_id).get_address(None)
diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs
index b43f9b24c6a..c04c75e1b11 100644
--- a/compiler/rustc_codegen_gcc/src/consts.rs
+++ b/compiler/rustc_codegen_gcc/src/consts.rs
@@ -294,7 +294,7 @@ pub(crate) fn const_alloc_to_gcc_uncached<'gcc>(
     let alloc = alloc.inner();
     let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1);
     let dl = cx.data_layout();
-    let pointer_size = dl.pointer_size.bytes() as usize;
+    let pointer_size = dl.pointer_size().bytes() as usize;
 
     let mut next_offset = 0;
     for &(offset, prov) in alloc.provenance().ptrs().iter() {
@@ -331,7 +331,7 @@ pub(crate) fn const_alloc_to_gcc_uncached<'gcc>(
             ),
             abi::Scalar::Initialized {
                 value: Primitive::Pointer(address_space),
-                valid_range: WrappingRange::full(dl.pointer_size),
+                valid_range: WrappingRange::full(dl.pointer_size()),
             },
             cx.type_i8p_ext(address_space),
         ));
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
index 497605978fe..0753ac1aeb8 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -541,7 +541,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
                         // For rusty ABIs, small aggregates are actually passed
                         // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
                         // so we re-use that same threshold here.
-                        layout.size() <= self.data_layout().pointer_size * 2
+                        layout.size() <= self.data_layout().pointer_size() * 2
                     }
                 };
 
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
index 2e508813fc3..350915a277e 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
@@ -1184,7 +1184,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
         let lhs = args[0].immediate();
         let rhs = args[1].immediate();
         let is_add = name == sym::simd_saturating_add;
-        let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
+        let ptr_bits = bx.tcx().data_layout.pointer_size().bits() as _;
         let (signed, elem_width, elem_ty) = match *in_elem.kind() {
             ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_int_from_ty(i)),
             ty::Uint(i) => {
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
index 1a6eec0ed0b..d8fae1ca47d 100644
--- a/compiler/rustc_codegen_gcc/src/lib.rs
+++ b/compiler/rustc_codegen_gcc/src/lib.rs
@@ -19,7 +19,6 @@
 #![doc(rust_logo)]
 #![feature(rustdoc_internals)]
 #![feature(rustc_private)]
-#![allow(broken_intra_doc_links)]
 #![recursion_limit = "256"]
 #![warn(rust_2018_idioms)]
 #![warn(unused_lifetimes)]
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 9ddadcf16aa..a643a91141e 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -384,15 +384,19 @@ impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
     ) {
         let asm_arch = self.tcx.sess.asm_arch.unwrap();
 
-        // Default to Intel syntax on x86
-        let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
-            && !options.contains(InlineAsmOptions::ATT_SYNTAX);
-
         // Build the template string
         let mut template_str = String::new();
-        if intel_syntax {
-            template_str.push_str(".intel_syntax\n");
+
+        // On X86 platforms there are two assembly syntaxes. Rust uses intel by default,
+        // but AT&T can be specified explicitly.
+        if matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64) {
+            if options.contains(InlineAsmOptions::ATT_SYNTAX) {
+                template_str.push_str(".att_syntax\n")
+            } else {
+                template_str.push_str(".intel_syntax\n")
+            }
         }
+
         for piece in template {
             match *piece {
                 InlineAsmTemplatePiece::String(ref s) => template_str.push_str(s),
@@ -431,7 +435,11 @@ impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
                 }
             }
         }
-        if intel_syntax {
+
+        // Just to play it safe, if intel was used, reset the assembly syntax to att.
+        if matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
+            && !options.contains(InlineAsmOptions::ATT_SYNTAX)
+        {
             template_str.push_str("\n.att_syntax\n");
         }
 
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index bde6a9cf4bc..506286fc255 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -1182,7 +1182,7 @@ fn create_msvc_imps(
         .filter_map(|val| {
             // Exclude some symbols that we know are not Rust symbols.
             let name = llvm::get_value_name(val);
-            if ignored(name) { None } else { Some((val, name)) }
+            if ignored(&name) { None } else { Some((val, name)) }
         })
         .map(move |(val, name)| {
             let mut imp_name = prefix.as_bytes().to_vec();
diff --git a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs
index b07d9a5cfca..5afb9a60d42 100644
--- a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs
+++ b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs
@@ -306,7 +306,7 @@ fn generate_enzyme_call<'ll>(
     // add outer_fn name to ad_name to make it unique, in case users apply autodiff to multiple
     // functions. Unwrap will only panic, if LLVM gave us an invalid string.
     let name = llvm::get_value_name(outer_fn);
-    let outer_fn_name = std::str::from_utf8(name).unwrap();
+    let outer_fn_name = std::str::from_utf8(&name).unwrap();
     ad_name.push_str(outer_fn_name);
 
     // Let us assume the user wrote the following function square:
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index 7cfab25bc50..b9b5c776d86 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -3,9 +3,8 @@
 use std::borrow::Borrow;
 
 use libc::{c_char, c_uint};
-use rustc_abi as abi;
-use rustc_abi::HasDataLayout;
 use rustc_abi::Primitive::Pointer;
+use rustc_abi::{self as abi, HasDataLayout as _};
 use rustc_ast::Mutability;
 use rustc_codegen_ssa::common::TypeKind;
 use rustc_codegen_ssa::traits::*;
@@ -175,7 +174,7 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> {
     }
 
     fn const_usize(&self, i: u64) -> &'ll Value {
-        let bit_size = self.data_layout().pointer_size.bits();
+        let bit_size = self.data_layout().pointer_size().bits();
         if bit_size < 64 {
             // make sure it doesn't overflow
             assert!(i < (1 << bit_size));
@@ -284,7 +283,8 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> {
                                 self.const_bitcast(llval, llty)
                             };
                         } else {
-                            let init = const_alloc_to_llvm(self, alloc, /*static*/ false);
+                            let init =
+                                const_alloc_to_llvm(self, alloc.inner(), /*static*/ false);
                             let alloc = alloc.inner();
                             let value = match alloc.mutability {
                                 Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
@@ -316,15 +316,19 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> {
                                 }),
                             )))
                             .unwrap_memory();
-                        let init = const_alloc_to_llvm(self, alloc, /*static*/ false);
-                        let value = self.static_addr_of_impl(init, alloc.inner().align, None);
-                        value
+                        let init = const_alloc_to_llvm(self, alloc.inner(), /*static*/ false);
+                        self.static_addr_of_impl(init, alloc.inner().align, None)
                     }
                     GlobalAlloc::Static(def_id) => {
                         assert!(self.tcx.is_static(def_id));
                         assert!(!self.tcx.is_thread_local_static(def_id));
                         self.get_static(def_id)
                     }
+                    GlobalAlloc::TypeId { .. } => {
+                        // Drop the provenance, the offset contains the bytes of the hash
+                        let llval = self.const_usize(offset.bytes());
+                        return unsafe { llvm::LLVMConstIntToPtr(llval, llty) };
+                    }
                 };
                 let base_addr_space = global_alloc.address_space(self);
                 let llval = unsafe {
@@ -346,7 +350,7 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> {
     }
 
     fn const_data_from_alloc(&self, alloc: ConstAllocation<'_>) -> Self::Value {
-        const_alloc_to_llvm(self, alloc, /*static*/ false)
+        const_alloc_to_llvm(self, alloc.inner(), /*static*/ false)
     }
 
     fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value {
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index a4492d76c3c..5deddb3ed98 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -27,10 +27,9 @@ use crate::{base, debuginfo};
 
 pub(crate) fn const_alloc_to_llvm<'ll>(
     cx: &CodegenCx<'ll, '_>,
-    alloc: ConstAllocation<'_>,
+    alloc: &Allocation,
     is_static: bool,
 ) -> &'ll Value {
-    let alloc = alloc.inner();
     // We expect that callers of const_alloc_to_llvm will instead directly codegen a pointer or
     // integer for any &ZST where the ZST is a constant (i.e. not a static). We should never be
     // producing empty LLVM allocations as they're just adding noise to binaries and forcing less
@@ -43,7 +42,8 @@ pub(crate) fn const_alloc_to_llvm<'ll>(
     }
     let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1);
     let dl = cx.data_layout();
-    let pointer_size = dl.pointer_size.bytes() as usize;
+    let pointer_size = dl.pointer_size();
+    let pointer_size_bytes = pointer_size.bytes() as usize;
 
     // Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`, so `range`
     // must be within the bounds of `alloc` and not contain or overlap a pointer provenance.
@@ -100,7 +100,9 @@ pub(crate) fn const_alloc_to_llvm<'ll>(
             // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
             // affect interpreter execution (we inspect the result after interpreter execution),
             // and we properly interpret the provenance as a relocation pointer offset.
-            alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
+            alloc.inspect_with_uninit_and_ptr_outside_interpreter(
+                offset..(offset + pointer_size_bytes),
+            ),
         )
         .expect("const_alloc_to_llvm: could not read relocation pointer")
             as u64;
@@ -111,11 +113,11 @@ pub(crate) fn const_alloc_to_llvm<'ll>(
             InterpScalar::from_pointer(Pointer::new(prov, Size::from_bytes(ptr_offset)), &cx.tcx),
             Scalar::Initialized {
                 value: Primitive::Pointer(address_space),
-                valid_range: WrappingRange::full(dl.pointer_size),
+                valid_range: WrappingRange::full(pointer_size),
             },
             cx.type_ptr_ext(address_space),
         ));
-        next_offset = offset + pointer_size;
+        next_offset = offset + pointer_size_bytes;
     }
     if alloc.len() >= next_offset {
         let range = next_offset..alloc.len();
@@ -138,7 +140,7 @@ fn codegen_static_initializer<'ll, 'tcx>(
     def_id: DefId,
 ) -> Result<(&'ll Value, ConstAllocation<'tcx>), ErrorHandled> {
     let alloc = cx.tcx.eval_static_initializer(def_id)?;
-    Ok((const_alloc_to_llvm(cx, alloc, /*static*/ true), alloc))
+    Ok((const_alloc_to_llvm(cx, alloc.inner(), /*static*/ true), alloc))
 }
 
 fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) {
@@ -427,7 +429,7 @@ impl<'ll> CodegenCx<'ll, '_> {
                 // specific rules on what can be cast. So instead of adding a new way to
                 // generate static initializers that match the static's type, we picked
                 // the easier option and retroactively change the type of the static item itself.
-                let name = llvm::get_value_name(g).to_vec();
+                let name = llvm::get_value_name(g);
                 llvm::set_value_name(g, b"");
 
                 let linkage = llvm::get_linkage(g);
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 0324dff6ff2..90582e23b04 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -605,7 +605,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
         GenericCx(
             FullCx {
                 tcx,
-                scx: SimpleCx::new(llmod, llcx, tcx.data_layout.pointer_size),
+                scx: SimpleCx::new(llmod, llcx, tcx.data_layout.pointer_size()),
                 use_dll_storage_attrs,
                 tls_model,
                 codegen_unit,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index 7f3e486ca31..9b4736e50e6 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -159,13 +159,15 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
     return_if_di_node_created_in_meantime!(cx, unique_type_id);
 
     let data_layout = &cx.tcx.data_layout;
+    let pointer_size = data_layout.pointer_size();
+    let pointer_align = data_layout.pointer_align();
     let ptr_type_debuginfo_name = compute_debuginfo_type_name(cx.tcx, ptr_type, true);
 
     match wide_pointer_kind(cx, pointee_type) {
         None => {
             // This is a thin pointer. Create a regular pointer type and give it the correct name.
             assert_eq!(
-                (data_layout.pointer_size, data_layout.pointer_align.abi),
+                (pointer_size, pointer_align.abi),
                 cx.size_and_align_of(ptr_type),
                 "ptr_type={ptr_type}, pointee_type={pointee_type}",
             );
@@ -174,8 +176,8 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
                 llvm::LLVMRustDIBuilderCreatePointerType(
                     DIB(cx),
                     pointee_type_di_node,
-                    data_layout.pointer_size.bits(),
-                    data_layout.pointer_align.abi.bits() as u32,
+                    pointer_size.bits(),
+                    pointer_align.abi.bits() as u32,
                     0, // Ignore DWARF address space.
                     ptr_type_debuginfo_name.as_c_char_ptr(),
                     ptr_type_debuginfo_name.len(),
@@ -319,7 +321,9 @@ fn build_subroutine_type_di_node<'ll, 'tcx>(
     let name = compute_debuginfo_type_name(cx.tcx, fn_ty, false);
     let (size, align) = match fn_ty.kind() {
         ty::FnDef(..) => (Size::ZERO, Align::ONE),
-        ty::FnPtr(..) => (cx.tcx.data_layout.pointer_size, cx.tcx.data_layout.pointer_align.abi),
+        ty::FnPtr(..) => {
+            (cx.tcx.data_layout.pointer_size(), cx.tcx.data_layout.pointer_align().abi)
+        }
         _ => unreachable!(),
     };
     let di_node = unsafe {
@@ -504,7 +508,7 @@ fn recursion_marker_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll D
         create_basic_type(
             cx,
             "<recur_type>",
-            cx.tcx.data_layout.pointer_size,
+            cx.tcx.data_layout.pointer_size(),
             dwarf_const::DW_ATE_unsigned,
         )
     })
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 35922c100cd..fcc0d378f06 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -458,7 +458,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                         // For rusty ABIs, small aggregates are actually passed
                         // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
                         // so we re-use that same threshold here.
-                        layout.size() <= self.data_layout().pointer_size * 2
+                        layout.size() <= self.data_layout().pointer_size() * 2
                     }
                 };
 
@@ -758,8 +758,8 @@ fn codegen_msvc_try<'ll, 'tcx>(
         //      }
         //
         // More information can be found in libstd's seh.rs implementation.
-        let ptr_size = bx.tcx().data_layout.pointer_size;
-        let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+        let ptr_size = bx.tcx().data_layout.pointer_size();
+        let ptr_align = bx.tcx().data_layout.pointer_align().abi;
         let slot = bx.alloca(ptr_size, ptr_align);
         let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
         bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
@@ -1031,8 +1031,8 @@ fn codegen_emcc_try<'ll, 'tcx>(
 
         // We need to pass two values to catch_func (ptr and is_rust_panic), so
         // create an alloca and pass a pointer to that.
-        let ptr_size = bx.tcx().data_layout.pointer_size;
-        let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+        let ptr_size = bx.tcx().data_layout.pointer_size();
+        let ptr_align = bx.tcx().data_layout.pointer_align().abi;
         let i8_align = bx.tcx().data_layout.i8_align.abi;
         // Required in order for there to be no padding between the fields.
         assert!(i8_align <= ptr_align);
@@ -1158,9 +1158,11 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     macro_rules! require_int_or_uint_ty {
         ($ty: expr, $diag: expr) => {
             match $ty {
-                ty::Int(i) => i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
+                ty::Int(i) => {
+                    i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
+                }
                 ty::Uint(i) => {
-                    i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
+                    i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
                 }
                 _ => {
                     return_error!($diag);
@@ -2014,10 +2016,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 } else {
                     let bitwidth = match in_elem.kind() {
                         ty::Int(i) => {
-                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
+                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
                         }
                         ty::Uint(i) => {
-                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits())
+                            i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
                         }
                         _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
                             span,
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index cdfffbe47bf..63ca51b006d 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -113,7 +113,7 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
     ) -> ModuleLlvm {
         let module_llvm = ModuleLlvm::new_metadata(tcx, module_name);
         let cx =
-            SimpleCx::new(module_llvm.llmod(), &module_llvm.llcx, tcx.data_layout.pointer_size);
+            SimpleCx::new(module_llvm.llmod(), &module_llvm.llcx, tcx.data_layout.pointer_size());
         unsafe {
             allocator::codegen(tcx, cx, module_name, kind, alloc_error_handler_kind);
         }
diff --git a/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs
index 7b00b2da6ba..c696b8d8ff2 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs
@@ -1,4 +1,3 @@
-#![allow(non_camel_case_types)]
 #![expect(dead_code)]
 
 use libc::{c_char, c_uint};
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 91ada856d59..b9dd10ff014 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -1980,12 +1980,12 @@ unsafe extern "C" {
     pub(crate) fn LLVMRustBuildMinNum<'a>(
         B: &Builder<'a>,
         LHS: &'a Value,
-        LHS: &'a Value,
+        RHS: &'a Value,
     ) -> &'a Value;
     pub(crate) fn LLVMRustBuildMaxNum<'a>(
         B: &Builder<'a>,
         LHS: &'a Value,
-        LHS: &'a Value,
+        RHS: &'a Value,
     ) -> &'a Value;
 
     // Atomic Operations
diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
index 661174a80df..3fc83fca352 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
@@ -211,7 +211,7 @@ pub(crate) fn SetFunctionCallConv(fn_: &Value, cc: CallConv) {
 // function.
 // For more details on COMDAT sections see e.g., https://www.airs.com/blog/archives/52
 pub(crate) fn SetUniqueComdat(llmod: &Module, val: &Value) {
-    let name_buf = get_value_name(val).to_vec();
+    let name_buf = get_value_name(val);
     let name =
         CString::from_vec_with_nul(name_buf).or_else(|buf| CString::new(buf.into_bytes())).unwrap();
     set_comdat(llmod, val, &name);
@@ -319,12 +319,14 @@ pub(crate) fn get_param(llfn: &Value, index: c_uint) -> &Value {
     }
 }
 
-/// Safe wrapper for `LLVMGetValueName2` into a byte slice
-pub(crate) fn get_value_name(value: &Value) -> &[u8] {
+/// Safe wrapper for `LLVMGetValueName2`
+/// Needs to allocate the value, because `set_value_name` will invalidate
+/// the pointer.
+pub(crate) fn get_value_name(value: &Value) -> Vec<u8> {
     unsafe {
         let mut len = 0;
         let data = LLVMGetValueName2(value, &mut len);
-        std::slice::from_raw_parts(data.cast(), len)
+        std::slice::from_raw_parts(data.cast(), len).to_vec()
     }
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
index 453eca2bbe1..ee472e75ed4 100644
--- a/compiler/rustc_codegen_llvm/src/type_.rs
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -208,7 +208,7 @@ impl<'ll, CX: Borrow<SCx<'ll>>> BaseTypeCodegenMethods for GenericCx<'ll, CX> {
     }
 
     fn type_ptr(&self) -> &'ll Type {
-        self.type_ptr_ext(AddressSpace::DATA)
+        self.type_ptr_ext(AddressSpace::ZERO)
     }
 
     fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type {
@@ -258,7 +258,7 @@ impl Type {
     }
 
     pub(crate) fn ptr_llcx(llcx: &llvm::Context) -> &Type {
-        unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::DATA.0) }
+        unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::ZERO.0) }
     }
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index 486dc894a4e..ce079f3cb0a 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -45,7 +45,8 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
     let va_list_ty = bx.type_ptr();
     let va_list_addr = list.immediate();
 
-    let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+    let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi;
+    let ptr = bx.load(va_list_ty, va_list_addr, ptr_align_abi);
 
     let (addr, addr_align) = if allow_higher_align && align > slot_size {
         (round_pointer_up_to_alignment(bx, ptr, align, bx.type_ptr()), align)
@@ -56,7 +57,7 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
     let aligned_size = size.align_to(slot_size).bytes() as i32;
     let full_direct_size = bx.cx().const_i32(aligned_size);
     let next = bx.inbounds_ptradd(addr, full_direct_size);
-    bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+    bx.store(next, va_list_addr, ptr_align_abi);
 
     if size.bytes() < slot_size.bytes()
         && bx.tcx().sess.target.endian == Endian::Big
@@ -108,8 +109,8 @@ fn emit_ptr_va_arg<'ll, 'tcx>(
     let (llty, size, align) = if indirect {
         (
             bx.cx.layout_of(Ty::new_imm_ptr(bx.cx.tcx, target_ty)).llvm_type(bx.cx),
-            bx.cx.data_layout().pointer_size,
-            bx.cx.data_layout().pointer_align,
+            bx.cx.data_layout().pointer_size(),
+            bx.cx.data_layout().pointer_align(),
         )
     } else {
         (layout.llvm_type(bx.cx), layout.size, layout.align)
@@ -204,7 +205,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
 
     bx.switch_to_block(in_reg);
     let top_type = bx.type_ptr();
-    let top = bx.load(top_type, reg_top, dl.pointer_align.abi);
+    let top = bx.load(top_type, reg_top, dl.pointer_align().abi);
 
     // reg_value = *(@top + reg_off_v);
     let mut reg_addr = bx.ptradd(top, reg_off_v);
@@ -297,6 +298,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>(
 
     let max_regs = 8u8;
     let use_regs = bx.icmp(IntPredicate::IntULT, num_regs, bx.const_u8(max_regs));
+    let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi;
 
     let in_reg = bx.append_sibling_block("va_arg.in_reg");
     let in_mem = bx.append_sibling_block("va_arg.in_mem");
@@ -308,7 +310,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>(
         bx.switch_to_block(in_reg);
 
         let reg_safe_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2 + 4));
-        let mut reg_addr = bx.load(bx.type_ptr(), reg_safe_area_ptr, dl.pointer_align.abi);
+        let mut reg_addr = bx.load(bx.type_ptr(), reg_safe_area_ptr, ptr_align_abi);
 
         // Floating-point registers start after the general-purpose registers.
         if !is_int && !is_soft_float_abi {
@@ -342,11 +344,11 @@ fn emit_powerpc_va_arg<'ll, 'tcx>(
         let size = if !is_indirect {
             layout.layout.size.align_to(overflow_area_align)
         } else {
-            dl.pointer_size
+            dl.pointer_size()
         };
 
         let overflow_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2));
-        let mut overflow_area = bx.load(bx.type_ptr(), overflow_area_ptr, dl.pointer_align.abi);
+        let mut overflow_area = bx.load(bx.type_ptr(), overflow_area_ptr, ptr_align_abi);
 
         // Round up address of argument to alignment
         if layout.layout.align.abi > overflow_area_align {
@@ -362,7 +364,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>(
 
         // Increase the overflow area.
         overflow_area = bx.inbounds_ptradd(overflow_area, bx.const_usize(size.bytes()));
-        bx.store(overflow_area, overflow_area_ptr, dl.pointer_align.abi);
+        bx.store(overflow_area, overflow_area_ptr, ptr_align_abi);
 
         bx.br(end);
 
@@ -373,11 +375,8 @@ fn emit_powerpc_va_arg<'ll, 'tcx>(
     bx.switch_to_block(end);
     let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
     let val_type = layout.llvm_type(bx);
-    let val_addr = if is_indirect {
-        bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi)
-    } else {
-        val_addr
-    };
+    let val_addr =
+        if is_indirect { bx.load(bx.cx.type_ptr(), val_addr, ptr_align_abi) } else { val_addr };
     bx.load(val_type, val_addr, layout.align.abi)
 }
 
@@ -414,6 +413,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
     let in_reg = bx.append_sibling_block("va_arg.in_reg");
     let in_mem = bx.append_sibling_block("va_arg.in_mem");
     let end = bx.append_sibling_block("va_arg.end");
+    let ptr_align_abi = dl.pointer_align().abi;
 
     // FIXME: vector ABI not yet supported.
     let target_ty_size = bx.cx.size_of(target_ty).bytes();
@@ -435,7 +435,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
     bx.switch_to_block(in_reg);
 
     // Work out the address of the value in the register save area.
-    let reg_ptr_v = bx.load(bx.type_ptr(), reg_save_area, dl.pointer_align.abi);
+    let reg_ptr_v = bx.load(bx.type_ptr(), reg_save_area, ptr_align_abi);
     let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8));
     let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding));
     let reg_addr = bx.ptradd(reg_ptr_v, reg_off);
@@ -449,15 +449,14 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
     bx.switch_to_block(in_mem);
 
     // Work out the address of the value in the argument overflow area.
-    let arg_ptr_v =
-        bx.load(bx.type_ptr(), overflow_arg_area, bx.tcx().data_layout.pointer_align.abi);
+    let arg_ptr_v = bx.load(bx.type_ptr(), overflow_arg_area, ptr_align_abi);
     let arg_off = bx.const_u64(padding);
     let mem_addr = bx.ptradd(arg_ptr_v, arg_off);
 
     // Update the argument overflow area pointer.
     let arg_size = bx.cx().const_u64(padded_size);
     let new_arg_ptr_v = bx.inbounds_ptradd(arg_ptr_v, arg_size);
-    bx.store(new_arg_ptr_v, overflow_arg_area, dl.pointer_align.abi);
+    bx.store(new_arg_ptr_v, overflow_arg_area, ptr_align_abi);
     bx.br(end);
 
     // Return the appropriate result.
@@ -465,7 +464,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
     let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
     let val_type = layout.llvm_type(bx);
     let val_addr =
-        if indirect { bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi) } else { val_addr };
+        if indirect { bx.load(bx.cx.type_ptr(), val_addr, ptr_align_abi) } else { val_addr };
     bx.load(val_type, val_addr, layout.align.abi)
 }
 
@@ -607,7 +606,7 @@ fn emit_x86_64_sysv64_va_arg<'ll, 'tcx>(
     // loads than necessary. Can we clean this up?
     let reg_save_area_ptr =
         bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * unsigned_int_offset + ptr_offset));
-    let reg_save_area_v = bx.load(bx.type_ptr(), reg_save_area_ptr, dl.pointer_align.abi);
+    let reg_save_area_v = bx.load(bx.type_ptr(), reg_save_area_ptr, dl.pointer_align().abi);
 
     let reg_addr = match layout.layout.backend_repr() {
         BackendRepr::Scalar(scalar) => match scalar.primitive() {
@@ -749,10 +748,11 @@ fn x86_64_sysv64_va_arg_from_memory<'ll, 'tcx>(
     layout: TyAndLayout<'tcx, Ty<'tcx>>,
 ) -> &'ll Value {
     let dl = bx.cx.data_layout();
+    let ptr_align_abi = dl.data_layout().pointer_align().abi;
 
     let overflow_arg_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.const_usize(8));
 
-    let overflow_arg_area_v = bx.load(bx.type_ptr(), overflow_arg_area_ptr, dl.pointer_align.abi);
+    let overflow_arg_area_v = bx.load(bx.type_ptr(), overflow_arg_area_ptr, ptr_align_abi);
     // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
     // byte boundary if alignment needed by type exceeds 8 byte boundary.
     // It isn't stated explicitly in the standard, but in practice we use
@@ -771,7 +771,7 @@ fn x86_64_sysv64_va_arg_from_memory<'ll, 'tcx>(
     let size_in_bytes = layout.layout.size().bytes();
     let offset = bx.const_i32(size_in_bytes.next_multiple_of(8) as i32);
     let overflow_arg_area = bx.inbounds_ptradd(overflow_arg_area_v, offset);
-    bx.store(overflow_arg_area, overflow_arg_area_ptr, dl.pointer_align.abi);
+    bx.store(overflow_arg_area, overflow_arg_area_ptr, ptr_align_abi);
 
     mem_addr
 }
@@ -803,6 +803,7 @@ fn emit_xtensa_va_arg<'ll, 'tcx>(
     let from_stack = bx.append_sibling_block("va_arg.from_stack");
     let from_regsave = bx.append_sibling_block("va_arg.from_regsave");
     let end = bx.append_sibling_block("va_arg.end");
+    let ptr_align_abi = bx.tcx().data_layout.pointer_align().abi;
 
     // (*va).va_ndx
     let va_reg_offset = 4;
@@ -825,12 +826,11 @@ fn emit_xtensa_va_arg<'ll, 'tcx>(
 
     bx.switch_to_block(from_regsave);
     // update va_ndx
-    bx.store(offset_next, offset_ptr, bx.tcx().data_layout.pointer_align.abi);
+    bx.store(offset_next, offset_ptr, ptr_align_abi);
 
     // (*va).va_reg
     let regsave_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(va_reg_offset));
-    let regsave_area =
-        bx.load(bx.type_ptr(), regsave_area_ptr, bx.tcx().data_layout.pointer_align.abi);
+    let regsave_area = bx.load(bx.type_ptr(), regsave_area_ptr, ptr_align_abi);
     let regsave_value_ptr = bx.inbounds_ptradd(regsave_area, offset);
     bx.br(end);
 
@@ -849,11 +849,11 @@ fn emit_xtensa_va_arg<'ll, 'tcx>(
     // va_ndx = offset_next_corrected;
     let offset_next_corrected = bx.add(offset_next, bx.const_i32(slot_size));
     // update va_ndx
-    bx.store(offset_next_corrected, offset_ptr, bx.tcx().data_layout.pointer_align.abi);
+    bx.store(offset_next_corrected, offset_ptr, ptr_align_abi);
 
     // let stack_value_ptr = unsafe { (*va).va_stk.byte_add(offset_corrected) };
     let stack_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(0));
-    let stack_area = bx.load(bx.type_ptr(), stack_area_ptr, bx.tcx().data_layout.pointer_align.abi);
+    let stack_area = bx.load(bx.type_ptr(), stack_area_ptr, ptr_align_abi);
     let stack_value_ptr = bx.inbounds_ptradd(stack_area, offset_corrected);
     bx.br(end);
 
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
index 343cb0eeca9..b46773396fc 100644
--- a/compiler/rustc_codegen_ssa/src/back/link.rs
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -1379,7 +1379,7 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
         }
     }
 
-    let features = sess.opts.unstable_opts.linker_features;
+    let features = sess.opts.cg.linker_features;
 
     // linker and linker flavor specified via command line have precedence over what the target
     // specification specifies
@@ -3327,35 +3327,6 @@ fn add_lld_args(
     // this, `wasm-component-ld`, which is overridden if this option is passed.
     if !sess.target.is_like_wasm {
         cmd.cc_arg("-fuse-ld=lld");
-
-        // On ELF platforms like at least x64 linux, GNU ld and LLD have opposite defaults on some
-        // section garbage-collection features. For example, the somewhat popular `linkme` crate and
-        // its dependents rely in practice on this difference: when using lld, they need `-z
-        // nostart-stop-gc` to prevent encapsulation symbols and sections from being
-        // garbage-collected.
-        //
-        // More information about all this can be found in:
-        // - https://maskray.me/blog/2021-01-31-metadata-sections-comdat-and-shf-link-order
-        // - https://lld.llvm.org/ELF/start-stop-gc
-        //
-        // So when using lld, we restore, for now, the traditional behavior to help migration, but
-        // will remove it in the future.
-        // Since this only disables an optimization, it shouldn't create issues, but is in theory
-        // slightly suboptimal. However, it:
-        // - doesn't have any visible impact on our benchmarks
-        // - reduces the need to disable lld for the crates that depend on this
-        //
-        // Note that lld can detect some cases where this difference is relied on, and emits a
-        // dedicated error to add this link arg. We could make use of this error to emit an FCW. As
-        // of writing this, we don't do it, because lld is already enabled by default on nightly
-        // without this mitigation: no working project would see the FCW, so we do this to help
-        // stabilization.
-        //
-        // FIXME: emit an FCW if linking fails due its absence, and then remove this link-arg in the
-        // future.
-        if sess.target.llvm_target == "x86_64-unknown-linux-gnu" {
-            cmd.link_arg("-znostart-stop-gc");
-        }
     }
 
     if !flavor.is_gnu() {
diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs
index 1896f63bd2d..e0a3ad55be0 100644
--- a/compiler/rustc_codegen_ssa/src/back/linker.rs
+++ b/compiler/rustc_codegen_ssa/src/back/linker.rs
@@ -800,9 +800,7 @@ impl<'a> Linker for GccLinker<'a> {
             return;
         }
 
-        let is_windows = self.sess.target.is_like_windows;
-        let path = tmpdir.join(if is_windows { "list.def" } else { "list" });
-
+        let path = tmpdir.join(if self.sess.target.is_like_windows { "list.def" } else { "list" });
         debug!("EXPORTED SYMBOLS:");
 
         if self.sess.target.is_like_darwin {
@@ -817,7 +815,8 @@ impl<'a> Linker for GccLinker<'a> {
             if let Err(error) = res {
                 self.sess.dcx().emit_fatal(errors::LibDefWriteFailure { error });
             }
-        } else if is_windows {
+            self.link_arg("-exported_symbols_list").link_arg(path);
+        } else if self.sess.target.is_like_windows {
             let res: io::Result<()> = try {
                 let mut f = File::create_buffered(&path)?;
 
@@ -835,6 +834,21 @@ impl<'a> Linker for GccLinker<'a> {
             if let Err(error) = res {
                 self.sess.dcx().emit_fatal(errors::LibDefWriteFailure { error });
             }
+            self.link_arg(path);
+        } else if crate_type == CrateType::Executable && !self.sess.target.is_like_solaris {
+            let res: io::Result<()> = try {
+                let mut f = File::create_buffered(&path)?;
+                writeln!(f, "{{")?;
+                for (sym, _) in symbols {
+                    debug!(sym);
+                    writeln!(f, "  {sym};")?;
+                }
+                writeln!(f, "}};")?;
+            };
+            if let Err(error) = res {
+                self.sess.dcx().emit_fatal(errors::VersionScriptWriteFailure { error });
+            }
+            self.link_arg("--dynamic-list").link_arg(path);
         } else {
             // Write an LD version script
             let res: io::Result<()> = try {
@@ -852,18 +866,13 @@ impl<'a> Linker for GccLinker<'a> {
             if let Err(error) = res {
                 self.sess.dcx().emit_fatal(errors::VersionScriptWriteFailure { error });
             }
-        }
-
-        if self.sess.target.is_like_darwin {
-            self.link_arg("-exported_symbols_list").link_arg(path);
-        } else if self.sess.target.is_like_solaris {
-            self.link_arg("-M").link_arg(path);
-        } else if is_windows {
-            self.link_arg(path);
-        } else {
-            let mut arg = OsString::from("--version-script=");
-            arg.push(path);
-            self.link_arg(arg).link_arg("--no-undefined-version");
+            if self.sess.target.is_like_solaris {
+                self.link_arg("-M").link_arg(path);
+            } else {
+                let mut arg = OsString::from("--version-script=");
+                arg.push(path);
+                self.link_arg(arg).link_arg("--no-undefined-version");
+            }
         }
     }
 
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
index 8330e4f7af0..d2a64ec2993 100644
--- a/compiler/rustc_codegen_ssa/src/back/write.rs
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -1208,7 +1208,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
         split_debuginfo: tcx.sess.split_debuginfo(),
         split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind,
         parallel: backend.supports_parallel() && !sess.opts.unstable_opts.no_parallel_backend,
-        pointer_size: tcx.data_layout.pointer_size,
+        pointer_size: tcx.data_layout.pointer_size(),
         invocation_temp: sess.invocation_temp.clone(),
     };
 
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
index 102d4ea2fa6..18581f854b6 100644
--- a/compiler/rustc_codegen_ssa/src/base.rs
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -200,7 +200,7 @@ fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
             let vptr_entry_idx = cx.tcx().supertrait_vtable_slot((source, target));
 
             if let Some(entry_idx) = vptr_entry_idx {
-                let ptr_size = bx.data_layout().pointer_size;
+                let ptr_size = bx.data_layout().pointer_size();
                 let vtable_byte_offset = u64::try_from(entry_idx).unwrap() * ptr_size.bytes();
                 load_vtable(bx, old_info, bx.type_ptr(), vtable_byte_offset, source, true)
             } else {
@@ -577,8 +577,8 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(bx: &mut Bx) -> (Bx::Va
         // Params for UEFI
         let param_handle = bx.get_param(0);
         let param_system_table = bx.get_param(1);
-        let ptr_size = bx.tcx().data_layout.pointer_size;
-        let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+        let ptr_size = bx.tcx().data_layout.pointer_size();
+        let ptr_align = bx.tcx().data_layout.pointer_align().abi;
         let arg_argc = bx.const_int(bx.cx().type_isize(), 2);
         let arg_argv = bx.alloca(2 * ptr_size, ptr_align);
         bx.store(param_handle, arg_argv, ptr_align);
diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
index ff454427871..85d01d4f938 100644
--- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
+++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
@@ -203,6 +203,13 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
                     UsedBy::Compiler => codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_COMPILER,
                     UsedBy::Linker => codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_LINKER,
                 },
+                AttributeKind::FfiConst(_) => {
+                    codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_CONST
+                }
+                AttributeKind::FfiPure(_) => codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_PURE,
+                AttributeKind::StdInternalSymbol(_) => {
+                    codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL
+                }
                 _ => {}
             }
         }
@@ -213,17 +220,12 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
 
         match name {
             sym::rustc_allocator => codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR,
-            sym::ffi_pure => codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_PURE,
-            sym::ffi_const => codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_CONST,
             sym::rustc_nounwind => codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND,
             sym::rustc_reallocator => codegen_fn_attrs.flags |= CodegenFnAttrFlags::REALLOCATOR,
             sym::rustc_deallocator => codegen_fn_attrs.flags |= CodegenFnAttrFlags::DEALLOCATOR,
             sym::rustc_allocator_zeroed => {
                 codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR_ZEROED
             }
-            sym::rustc_std_internal_symbol => {
-                codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL
-            }
             sym::thread_local => codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL,
             sym::linkage => {
                 if let Some(val) = attr.value_str() {
diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs
index 3a11ce6befb..2aa5c3c27ea 100644
--- a/compiler/rustc_codegen_ssa/src/meth.rs
+++ b/compiler/rustc_codegen_ssa/src/meth.rs
@@ -27,7 +27,7 @@ impl<'a, 'tcx> VirtualIndex {
         debug!("get_fn({llvtable:?}, {ty:?}, {self:?})");
 
         let llty = bx.fn_ptr_backend_type(fn_abi);
-        let ptr_size = bx.data_layout().pointer_size;
+        let ptr_size = bx.data_layout().pointer_size();
         let vtable_byte_offset = self.0 * ptr_size.bytes();
 
         load_vtable(bx, llvtable, llty, vtable_byte_offset, ty, nonnull)
@@ -63,7 +63,7 @@ impl<'a, 'tcx> VirtualIndex {
         debug!("get_int({:?}, {:?})", llvtable, self);
 
         let llty = bx.type_isize();
-        let ptr_size = bx.data_layout().pointer_size;
+        let ptr_size = bx.data_layout().pointer_size();
         let vtable_byte_offset = self.0 * ptr_size.bytes();
 
         load_vtable(bx, llvtable, llty, vtable_byte_offset, ty, false)
@@ -115,7 +115,7 @@ pub(crate) fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
     let vtable_alloc_id = tcx.vtable_allocation((ty, trait_ref));
     let vtable_allocation = tcx.global_alloc(vtable_alloc_id).unwrap_memory();
     let vtable_const = cx.const_data_from_alloc(vtable_allocation);
-    let align = cx.data_layout().pointer_align.abi;
+    let align = cx.data_layout().pointer_align().abi;
     let vtable = cx.static_addr_of(vtable_const, align, Some("vtable"));
 
     cx.apply_vcall_visibility_metadata(ty, trait_ref, vtable);
@@ -133,7 +133,7 @@ pub(crate) fn load_vtable<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     ty: Ty<'tcx>,
     nonnull: bool,
 ) -> Bx::Value {
-    let ptr_align = bx.data_layout().pointer_align.abi;
+    let ptr_align = bx.data_layout().pointer_align().abi;
 
     if bx.cx().sess().opts.unstable_opts.virtual_function_elimination
         && bx.cx().sess().lto() == Lto::Fat
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
index 99f35b79208..6d6465dd798 100644
--- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
@@ -171,8 +171,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'b, 'tcx>> Visitor<'tcx> for LocalAnalyzer
         if let Some(local) = place.as_local() {
             self.define(local, DefLocation::Assignment(location));
             if self.locals[local] != LocalKind::Memory {
-                let decl_span = self.fx.mir.local_decls[local].source_info.span;
-                if !self.fx.rvalue_creates_operand(rvalue, decl_span) {
+                if !self.fx.rvalue_creates_operand(rvalue) {
                     self.locals[local] = LocalKind::Memory;
                 }
             }
diff --git a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs
index 9da4b8cc8fd..beaf8950978 100644
--- a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs
@@ -326,7 +326,7 @@ fn prefix_and_suffix<'tcx>(
 fn wasm_functype<'tcx>(tcx: TyCtxt<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> String {
     let mut signature = String::with_capacity(64);
 
-    let ptr_type = match tcx.data_layout.pointer_size.bits() {
+    let ptr_type = match tcx.data_layout.pointer_size().bits() {
         32 => "i32",
         64 => "i64",
         other => bug!("wasm pointer size cannot be {other} bits"),
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index 2896dfd5463..bc83e41b278 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -186,7 +186,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
         offset: Size,
     ) -> Self {
         let alloc_align = alloc.inner().align;
-        assert!(alloc_align >= layout.align.abi);
+        assert!(alloc_align >= layout.align.abi, "{alloc_align:?} < {:?}", layout.align.abi);
 
         let read_scalar = |start, size, s: abi::Scalar, ty| {
             match alloc.0.read_scalar(
@@ -565,118 +565,167 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
             }
         }
     }
+}
 
-    /// Creates an incomplete operand containing the [`abi::Scalar`]s expected based
-    /// on the `layout` passed. This is for use with [`OperandRef::insert_field`]
-    /// later to set the necessary immediate(s), one-by-one converting all the `Right` to `Left`.
-    ///
-    /// Returns `None` for `layout`s which cannot be built this way.
-    pub(crate) fn builder(
-        layout: TyAndLayout<'tcx>,
-    ) -> Option<OperandRef<'tcx, Either<V, abi::Scalar>>> {
-        // Uninhabited types are weird, because for example `Result<!, !>`
-        // shows up as `FieldsShape::Primitive` and we need to be able to write
-        // a field into `(u32, !)`. We'll do that in an `alloca` instead.
-        if layout.uninhabited {
-            return None;
-        }
+/// Each of these variants starts out as `Either::Right` when it's uninitialized,
+/// then setting the field changes that to `Either::Left` with the backend value.
+#[derive(Debug, Copy, Clone)]
+enum OperandValueBuilder<V> {
+    ZeroSized,
+    Immediate(Either<V, abi::Scalar>),
+    Pair(Either<V, abi::Scalar>, Either<V, abi::Scalar>),
+    /// `repr(simd)` types need special handling because they each have a non-empty
+    /// array field (which uses [`OperandValue::Ref`]) despite the SIMD type itself
+    /// using [`OperandValue::Immediate`] which for any other kind of type would
+    /// mean that its one non-ZST field would also be [`OperandValue::Immediate`].
+    Vector(Either<V, ()>),
+}
+
+/// Allows building up an `OperandRef` by setting fields one at a time.
+#[derive(Debug, Copy, Clone)]
+pub(super) struct OperandRefBuilder<'tcx, V> {
+    val: OperandValueBuilder<V>,
+    layout: TyAndLayout<'tcx>,
+}
 
+impl<'a, 'tcx, V: CodegenObject> OperandRefBuilder<'tcx, V> {
+    /// Creates an uninitialized builder for an instance of the `layout`.
+    ///
+    /// ICEs for [`BackendRepr::Memory`] types (other than ZSTs), which should
+    /// be built up inside a [`PlaceRef`] instead as they need an allocated place
+    /// into which to write the values of the fields.
+    pub(super) fn new(layout: TyAndLayout<'tcx>) -> Self {
         let val = match layout.backend_repr {
-            BackendRepr::Memory { .. } if layout.is_zst() => OperandValue::ZeroSized,
-            BackendRepr::Scalar(s) => OperandValue::Immediate(Either::Right(s)),
-            BackendRepr::ScalarPair(a, b) => OperandValue::Pair(Either::Right(a), Either::Right(b)),
-            BackendRepr::Memory { .. } | BackendRepr::SimdVector { .. } => return None,
+            BackendRepr::Memory { .. } if layout.is_zst() => OperandValueBuilder::ZeroSized,
+            BackendRepr::Scalar(s) => OperandValueBuilder::Immediate(Either::Right(s)),
+            BackendRepr::ScalarPair(a, b) => {
+                OperandValueBuilder::Pair(Either::Right(a), Either::Right(b))
+            }
+            BackendRepr::SimdVector { .. } => OperandValueBuilder::Vector(Either::Right(())),
+            BackendRepr::Memory { .. } => {
+                bug!("Cannot use non-ZST Memory-ABI type in operand builder: {layout:?}");
+            }
         };
-        Some(OperandRef { val, layout })
+        OperandRefBuilder { val, layout }
     }
-}
 
-impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Either<V, abi::Scalar>> {
-    pub(crate) fn insert_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+    pub(super) fn insert_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         &mut self,
         bx: &mut Bx,
-        v: VariantIdx,
-        f: FieldIdx,
-        operand: OperandRef<'tcx, V>,
+        variant: VariantIdx,
+        field: FieldIdx,
+        field_operand: OperandRef<'tcx, V>,
     ) {
-        let (expect_zst, is_zero_offset) = if let abi::FieldsShape::Primitive = self.layout.fields {
+        if let OperandValue::ZeroSized = field_operand.val {
+            // A ZST never adds any state, so just ignore it.
+            // This special-casing is worth it because of things like
+            // `Result<!, !>` where `Ok(never)` is legal to write,
+            // but the type shows as FieldShape::Primitive so we can't
+            // actually look at the layout for the field being set.
+            return;
+        }
+
+        let is_zero_offset = if let abi::FieldsShape::Primitive = self.layout.fields {
             // The other branch looking at field layouts ICEs for primitives,
             // so we need to handle them separately.
-            // Multiple fields is possible for cases such as aggregating
-            // a thin pointer, where the second field is the unit.
+            // Because we handled ZSTs above (like the metadata in a thin pointer),
+            // the only possibility is that we're setting the one-and-only field.
             assert!(!self.layout.is_zst());
-            assert_eq!(v, FIRST_VARIANT);
-            let first_field = f == FieldIdx::ZERO;
-            (!first_field, first_field)
+            assert_eq!(variant, FIRST_VARIANT);
+            assert_eq!(field, FieldIdx::ZERO);
+            true
         } else {
-            let variant_layout = self.layout.for_variant(bx.cx(), v);
-            let field_layout = variant_layout.field(bx.cx(), f.as_usize());
-            let field_offset = variant_layout.fields.offset(f.as_usize());
-            (field_layout.is_zst(), field_offset == Size::ZERO)
+            let variant_layout = self.layout.for_variant(bx.cx(), variant);
+            let field_offset = variant_layout.fields.offset(field.as_usize());
+            field_offset == Size::ZERO
         };
 
         let mut update = |tgt: &mut Either<V, abi::Scalar>, src, from_scalar| {
             let to_scalar = tgt.unwrap_right();
+            // We transmute here (rather than just `from_immediate`) because in
+            // `Result<usize, *const ()>` the field of the `Ok` is an integer,
+            // but the corresponding scalar in the enum is a pointer.
             let imm = transmute_scalar(bx, src, from_scalar, to_scalar);
             *tgt = Either::Left(imm);
         };
 
-        match (operand.val, operand.layout.backend_repr) {
-            (OperandValue::ZeroSized, _) if expect_zst => {}
+        match (field_operand.val, field_operand.layout.backend_repr) {
+            (OperandValue::ZeroSized, _) => unreachable!("Handled above"),
             (OperandValue::Immediate(v), BackendRepr::Scalar(from_scalar)) => match &mut self.val {
-                OperandValue::Immediate(val @ Either::Right(_)) if is_zero_offset => {
+                OperandValueBuilder::Immediate(val @ Either::Right(_)) if is_zero_offset => {
                     update(val, v, from_scalar);
                 }
-                OperandValue::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
+                OperandValueBuilder::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
                     update(fst, v, from_scalar);
                 }
-                OperandValue::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
+                OperandValueBuilder::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
                     update(snd, v, from_scalar);
                 }
-                _ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"),
+                _ => {
+                    bug!("Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}")
+                }
+            },
+            (OperandValue::Immediate(v), BackendRepr::SimdVector { .. }) => match &mut self.val {
+                OperandValueBuilder::Vector(val @ Either::Right(())) if is_zero_offset => {
+                    *val = Either::Left(v);
+                }
+                _ => {
+                    bug!("Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}")
+                }
             },
             (OperandValue::Pair(a, b), BackendRepr::ScalarPair(from_sa, from_sb)) => {
                 match &mut self.val {
-                    OperandValue::Pair(fst @ Either::Right(_), snd @ Either::Right(_)) => {
+                    OperandValueBuilder::Pair(fst @ Either::Right(_), snd @ Either::Right(_)) => {
                         update(fst, a, from_sa);
                         update(snd, b, from_sb);
                     }
-                    _ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"),
+                    _ => bug!(
+                        "Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}"
+                    ),
                 }
             }
-            _ => bug!("Unsupported operand {operand:?} inserting into {v:?}.{f:?} of {self:?}"),
+            (OperandValue::Ref(place), BackendRepr::Memory { .. }) => match &mut self.val {
+                OperandValueBuilder::Vector(val @ Either::Right(())) => {
+                    let ibty = bx.cx().immediate_backend_type(self.layout);
+                    let simd = bx.load_from_place(ibty, place);
+                    *val = Either::Left(simd);
+                }
+                _ => {
+                    bug!("Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}")
+                }
+            },
+            _ => bug!("Operand cannot be used with `insert_field`: {field_operand:?}"),
         }
     }
 
     /// Insert the immediate value `imm` for field `f` in the *type itself*,
     /// rather than into one of the variants.
     ///
-    /// Most things want [`OperandRef::insert_field`] instead, but this one is
+    /// Most things want [`Self::insert_field`] instead, but this one is
     /// necessary for writing things like enum tags that aren't in any variant.
     pub(super) fn insert_imm(&mut self, f: FieldIdx, imm: V) {
         let field_offset = self.layout.fields.offset(f.as_usize());
         let is_zero_offset = field_offset == Size::ZERO;
         match &mut self.val {
-            OperandValue::Immediate(val @ Either::Right(_)) if is_zero_offset => {
+            OperandValueBuilder::Immediate(val @ Either::Right(_)) if is_zero_offset => {
                 *val = Either::Left(imm);
             }
-            OperandValue::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
+            OperandValueBuilder::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
                 *fst = Either::Left(imm);
             }
-            OperandValue::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
+            OperandValueBuilder::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
                 *snd = Either::Left(imm);
             }
             _ => bug!("Tried to insert {imm:?} into field {f:?} of {self:?}"),
         }
     }
 
-    /// After having set all necessary fields, this converts the
-    /// `OperandValue<Either<V, _>>` (as obtained from [`OperandRef::builder`])
-    /// to the normal `OperandValue<V>`.
+    /// After having set all necessary fields, this converts the builder back
+    /// to the normal `OperandRef`.
     ///
     /// ICEs if any required fields were not set.
-    pub fn build(&self, cx: &impl CodegenMethods<'tcx, Value = V>) -> OperandRef<'tcx, V> {
-        let OperandRef { val, layout } = *self;
+    pub(super) fn build(&self, cx: &impl CodegenMethods<'tcx, Value = V>) -> OperandRef<'tcx, V> {
+        let OperandRefBuilder { val, layout } = *self;
 
         // For something like `Option::<u32>::None`, it's expected that the
         // payload scalar will not actually have been set, so this converts
@@ -692,10 +741,22 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Either<V, abi::Scalar>> {
         };
 
         let val = match val {
-            OperandValue::ZeroSized => OperandValue::ZeroSized,
-            OperandValue::Immediate(v) => OperandValue::Immediate(unwrap(v)),
-            OperandValue::Pair(a, b) => OperandValue::Pair(unwrap(a), unwrap(b)),
-            OperandValue::Ref(_) => bug!(),
+            OperandValueBuilder::ZeroSized => OperandValue::ZeroSized,
+            OperandValueBuilder::Immediate(v) => OperandValue::Immediate(unwrap(v)),
+            OperandValueBuilder::Pair(a, b) => OperandValue::Pair(unwrap(a), unwrap(b)),
+            OperandValueBuilder::Vector(v) => match v {
+                Either::Left(v) => OperandValue::Immediate(v),
+                Either::Right(())
+                    if let BackendRepr::SimdVector { element, .. } = layout.backend_repr
+                        && element.is_uninit_valid() =>
+                {
+                    let bty = cx.immediate_backend_type(layout);
+                    OperandValue::Immediate(cx.const_undef(bty))
+                }
+                Either::Right(()) => {
+                    bug!("OperandRef::build called while fields are missing {self:?}")
+                }
+            },
         };
         OperandRef { val, layout }
     }
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 43726e93252..cbbb0196890 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -4,10 +4,9 @@ use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
 use rustc_middle::{bug, mir};
 use rustc_session::config::OptLevel;
-use rustc_span::{DUMMY_SP, Span};
 use tracing::{debug, instrument};
 
-use super::operand::{OperandRef, OperandValue};
+use super::operand::{OperandRef, OperandRefBuilder, OperandValue};
 use super::place::{PlaceRef, codegen_tag_value};
 use super::{FunctionCx, LocalRef};
 use crate::common::{IntPredicate, TypeKind};
@@ -181,7 +180,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             _ => {
-                assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
+                assert!(self.rvalue_creates_operand(rvalue));
                 let temp = self.codegen_rvalue_operand(bx, rvalue);
                 temp.val.store(bx, dest);
             }
@@ -354,10 +353,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         bx: &mut Bx,
         rvalue: &mir::Rvalue<'tcx>,
     ) -> OperandRef<'tcx, Bx::Value> {
-        assert!(
-            self.rvalue_creates_operand(rvalue, DUMMY_SP),
-            "cannot codegen {rvalue:?} to operand",
-        );
+        assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {rvalue:?} to operand",);
 
         match *rvalue {
             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
@@ -668,9 +664,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
                 // `rvalue_creates_operand` has arranged that we only get here if
                 // we can build the aggregate immediate from the field immediates.
-                let Some(mut builder) = OperandRef::builder(layout) else {
-                    bug!("Cannot use type in operand builder: {layout:?}")
-                };
+                let mut builder = OperandRefBuilder::new(layout);
                 for (field_idx, field) in fields.iter_enumerated() {
                     let op = self.codegen_operand(bx, field);
                     let fi = active_field_index.unwrap_or(field_idx);
@@ -980,7 +974,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     /// will not actually take the operand path because the result type is such
     /// that it always gets an `alloca`, but where it's not worth re-checking the
     /// layout in this code when the right thing will happen anyway.
-    pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
+    pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
         match *rvalue {
             mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
                 let operand_ty = operand.ty(self.mir, self.cx.tcx());
@@ -1025,18 +1019,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             mir::Rvalue::NullaryOp(..) |
             mir::Rvalue::ThreadLocalRef(_) |
             mir::Rvalue::Use(..) |
+            mir::Rvalue::Aggregate(..) | // (*)
             mir::Rvalue::WrapUnsafeBinder(..) => // (*)
                 true,
             // Arrays are always aggregates, so it's not worth checking anything here.
             // (If it's really `[(); N]` or `[T; 0]` and we use the place path, fine.)
             mir::Rvalue::Repeat(..) => false,
-            mir::Rvalue::Aggregate(..) => {
-                    let ty = rvalue.ty(self.mir, self.cx.tcx());
-                    let ty = self.monomorphize(ty);
-                    let layout = self.cx.spanned_layout_of(ty, span);
-                    OperandRef::<Bx::Value>::builder(layout).is_some()
-                }
-            }
+        }
 
         // (*) this is only true if the type is suitable
     }
@@ -1135,7 +1124,7 @@ fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
             let range = scalar.valid_range(bx.cx());
             bx.assume_integer_range(imm, backend_ty, range);
         }
-        abi::Primitive::Pointer(abi::AddressSpace::DATA)
+        abi::Primitive::Pointer(abi::AddressSpace::ZERO)
             if !scalar.valid_range(bx.cx()).contains(0) =>
         {
             bx.assume_nonnull(imm);
diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl
index 22a1894ee72..b767ca9a3c2 100644
--- a/compiler/rustc_const_eval/messages.ftl
+++ b/compiler/rustc_const_eval/messages.ftl
@@ -78,6 +78,8 @@ const_eval_dealloc_kind_mismatch =
 
 const_eval_deref_function_pointer =
     accessing {$allocation} which contains a function
+const_eval_deref_typeid_pointer =
+    accessing {$allocation} which contains a `TypeId`
 const_eval_deref_vtable_pointer =
     accessing {$allocation} which contains a vtable
 const_eval_division_by_zero =
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
index 76fa744361a..52fc898192a 100644
--- a/compiler/rustc_const_eval/src/const_eval/machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -331,10 +331,10 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
     fn load_mir(
         ecx: &InterpCx<'tcx, Self>,
         instance: ty::InstanceKind<'tcx>,
-    ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+    ) -> &'tcx mir::Body<'tcx> {
         match instance {
-            ty::InstanceKind::Item(def) => interp_ok(ecx.tcx.mir_for_ctfe(def)),
-            _ => interp_ok(ecx.tcx.instance_mir(instance)),
+            ty::InstanceKind::Item(def) => ecx.tcx.mir_for_ctfe(def),
+            _ => ecx.tcx.instance_mir(instance),
         }
     }
 
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
index 9133a5fc8ef..49cd7138748 100644
--- a/compiler/rustc_const_eval/src/errors.rs
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -475,6 +475,7 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
             WriteToReadOnly(_) => const_eval_write_to_read_only,
             DerefFunctionPointer(_) => const_eval_deref_function_pointer,
             DerefVTablePointer(_) => const_eval_deref_vtable_pointer,
+            DerefTypeIdPointer(_) => const_eval_deref_typeid_pointer,
             InvalidBool(_) => const_eval_invalid_bool,
             InvalidChar(_) => const_eval_invalid_char,
             InvalidTag(_) => const_eval_invalid_tag,
@@ -588,7 +589,10 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
                 diag.arg("has", has.bytes());
                 diag.arg("msg", format!("{msg:?}"));
             }
-            WriteToReadOnly(alloc) | DerefFunctionPointer(alloc) | DerefVTablePointer(alloc) => {
+            WriteToReadOnly(alloc)
+            | DerefFunctionPointer(alloc)
+            | DerefVTablePointer(alloc)
+            | DerefTypeIdPointer(alloc) => {
                 diag.arg("allocation", alloc);
             }
             InvalidBool(b) => {
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index 46c784b41c6..41fc8d47cd3 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -96,7 +96,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// This inherent method takes priority over the trait method with the same name in LayoutOf,
     /// and allows wrapping the actual [LayoutOf::layout_of] with a tracing span.
     /// See [LayoutOf::layout_of] for the original documentation.
-    #[inline]
+    #[inline(always)]
     pub fn layout_of(
         &self,
         ty: Ty<'tcx>,
@@ -272,7 +272,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             let def = instance.def_id();
             &self.tcx.promoted_mir(def)[promoted]
         } else {
-            M::load_mir(self, instance)?
+            M::load_mir(self, instance)
         };
         // do not continue if typeck errors occurred (can only occur in local crate)
         if let Some(err) = body.tainted_by_errors {
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index 378ed6d0e10..1eba1f2f03c 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -4,7 +4,7 @@
 
 use std::assert_matches::assert_matches;
 
-use rustc_abi::Size;
+use rustc_abi::{FieldIdx, Size};
 use rustc_apfloat::ieee::{Double, Half, Quad, Single};
 use rustc_middle::mir::{self, BinOp, ConstValue, NonDivergingIntrinsic};
 use rustc_middle::ty::layout::TyAndLayout;
@@ -28,8 +28,35 @@ pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAll
     let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes(), ());
     tcx.mk_const_alloc(alloc)
 }
-
 impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
+    /// Generates a value of `TypeId` for `ty` in-place.
+    pub(crate) fn write_type_id(
+        &mut self,
+        ty: Ty<'tcx>,
+        dest: &PlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, ()> {
+        let tcx = self.tcx;
+        let type_id_hash = tcx.type_id_hash(ty).as_u128();
+        let op = self.const_val_to_op(
+            ConstValue::Scalar(Scalar::from_u128(type_id_hash)),
+            tcx.types.u128,
+            None,
+        )?;
+        self.copy_op_allow_transmute(&op, dest)?;
+
+        // Give the first pointer-size bytes provenance that knows about the type id.
+        // Here we rely on `TypeId` being a newtype around an array of pointers, so we
+        // first project to its only field and then the first array element.
+        let alloc_id = tcx.reserve_and_set_type_id_alloc(ty);
+        let first = self.project_field(dest, FieldIdx::ZERO)?;
+        let first = self.project_index(&first, 0)?;
+        let offset = self.read_scalar(&first)?.to_target_usize(&tcx)?;
+        let ptr = Pointer::new(alloc_id.into(), Size::from_bytes(offset));
+        let ptr = self.global_root_pointer(ptr)?;
+        let val = Scalar::from_pointer(ptr, &tcx);
+        self.write_scalar(val, &first)
+    }
+
     /// Returns `true` if emulation happened.
     /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
     /// intrinsic handling.
@@ -63,9 +90,48 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             sym::type_id => {
                 let tp_ty = instance.args.type_at(0);
                 ensure_monomorphic_enough(tcx, tp_ty)?;
-                let val = ConstValue::from_u128(tcx.type_id_hash(tp_ty).as_u128());
-                let val = self.const_val_to_op(val, dest.layout.ty, Some(dest.layout))?;
-                self.copy_op(&val, dest)?;
+                self.write_type_id(tp_ty, dest)?;
+            }
+            sym::type_id_eq => {
+                // Both operands are `TypeId`, which is a newtype around an array of pointers.
+                // Project until we have the array elements.
+                let a_fields = self.project_field(&args[0], FieldIdx::ZERO)?;
+                let b_fields = self.project_field(&args[1], FieldIdx::ZERO)?;
+
+                let mut a_fields = self.project_array_fields(&a_fields)?;
+                let mut b_fields = self.project_array_fields(&b_fields)?;
+
+                let (_idx, a) = a_fields
+                    .next(self)?
+                    .expect("we know the layout of TypeId has at least 2 array elements");
+                let a = self.deref_pointer(&a)?;
+                let (a, offset_a) = self.get_ptr_type_id(a.ptr())?;
+
+                let (_idx, b) = b_fields
+                    .next(self)?
+                    .expect("we know the layout of TypeId has at least 2 array elements");
+                let b = self.deref_pointer(&b)?;
+                let (b, offset_b) = self.get_ptr_type_id(b.ptr())?;
+
+                let provenance_matches = a == b;
+
+                let mut eq_id = offset_a == offset_b;
+
+                while let Some((_, a)) = a_fields.next(self)? {
+                    let (_, b) = b_fields.next(self)?.unwrap();
+
+                    let a = self.read_target_usize(&a)?;
+                    let b = self.read_target_usize(&b)?;
+                    eq_id &= a == b;
+                }
+
+                if !eq_id && provenance_matches {
+                    throw_ub_format!(
+                        "type_id_eq: one of the TypeId arguments is invalid, the hash does not match the type it represents"
+                    )
+                }
+
+                self.write_scalar(Scalar::from_bool(provenance_matches), dest)?;
             }
             sym::variant_count => {
                 let tp_ty = instance.args.type_at(0);
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index 35ec303f961..d150ed69250 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -18,8 +18,8 @@ use rustc_target::callconv::FnAbi;
 
 use super::{
     AllocBytes, AllocId, AllocKind, AllocRange, Allocation, CTFE_ALLOC_SALT, ConstAllocation,
-    CtfeProvenance, FnArg, Frame, ImmTy, InterpCx, InterpResult, MPlaceTy, MemoryKind,
-    Misalignment, OpTy, PlaceTy, Pointer, Provenance, RangeSet, interp_ok, throw_unsup,
+    CtfeProvenance, EnteredTraceSpan, FnArg, Frame, ImmTy, InterpCx, InterpResult, MPlaceTy,
+    MemoryKind, Misalignment, OpTy, PlaceTy, Pointer, Provenance, RangeSet, interp_ok, throw_unsup,
 };
 
 /// Data returned by [`Machine::after_stack_pop`], and consumed by
@@ -147,12 +147,6 @@ pub trait Machine<'tcx>: Sized {
     /// already been checked before.
     const ALL_CONSTS_ARE_PRECHECKED: bool = true;
 
-    /// Determines whether rustc_const_eval functions that make use of the [Machine] should make
-    /// tracing calls (to the `tracing` library). By default this is `false`, meaning the tracing
-    /// calls will supposedly be optimized out. This flag is set to `true` inside Miri, to allow
-    /// tracing the interpretation steps, among other things.
-    const TRACING_ENABLED: bool = false;
-
     /// Whether memory accesses should be alignment-checked.
     fn enforce_alignment(ecx: &InterpCx<'tcx, Self>) -> bool;
 
@@ -189,8 +183,8 @@ pub trait Machine<'tcx>: Sized {
     fn load_mir(
         ecx: &InterpCx<'tcx, Self>,
         instance: ty::InstanceKind<'tcx>,
-    ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
-        interp_ok(ecx.tcx.instance_mir(instance))
+    ) -> &'tcx mir::Body<'tcx> {
+        ecx.tcx.instance_mir(instance)
     }
 
     /// Entry point to all function calls.
@@ -634,6 +628,17 @@ pub trait Machine<'tcx>: Sized {
     /// Compute the value passed to the constructors of the `AllocBytes` type for
     /// abstract machine allocations.
     fn get_default_alloc_params(&self) -> <Self::Bytes as AllocBytes>::AllocParams;
+
+    /// Allows enabling/disabling tracing calls from within `rustc_const_eval` at compile time, by
+    /// delegating the entering of [tracing::Span]s to implementors of the [Machine] trait. The
+    /// default implementation corresponds to tracing being disabled, meaning the tracing calls will
+    /// supposedly be optimized out completely. To enable tracing, override this trait method and
+    /// return `span.entered()`. Also see [crate::enter_trace_span].
+    #[must_use]
+    #[inline(always)]
+    fn enter_trace_span(_span: impl FnOnce() -> tracing::Span) -> impl EnteredTraceSpan {
+        ()
+    }
 }
 
 /// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index c97d53a45de..bb301600135 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -15,9 +15,9 @@ use std::{fmt, ptr};
 use rustc_abi::{Align, HasDataLayout, Size};
 use rustc_ast::Mutability;
 use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
-use rustc_middle::bug;
 use rustc_middle::mir::display_allocation;
 use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
+use rustc_middle::{bug, throw_ub_format};
 use tracing::{debug, instrument, trace};
 
 use super::{
@@ -346,6 +346,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                         kind = "vtable",
                     )
                 }
+                Some(GlobalAlloc::TypeId { .. }) => {
+                    err_ub_custom!(
+                        fluent::const_eval_invalid_dealloc,
+                        alloc_id = alloc_id,
+                        kind = "typeid",
+                    )
+                }
                 Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
                     err_ub_custom!(
                         fluent::const_eval_invalid_dealloc,
@@ -615,6 +622,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             }
             Some(GlobalAlloc::Function { .. }) => throw_ub!(DerefFunctionPointer(id)),
             Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)),
+            Some(GlobalAlloc::TypeId { .. }) => throw_ub!(DerefTypeIdPointer(id)),
             None => throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccess)),
             Some(GlobalAlloc::Static(def_id)) => {
                 assert!(self.tcx.is_static(def_id));
@@ -896,7 +904,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             let (size, align) = global_alloc.size_and_align(*self.tcx, self.typing_env);
             let mutbl = global_alloc.mutability(*self.tcx, self.typing_env);
             let kind = match global_alloc {
-                GlobalAlloc::Static { .. } | GlobalAlloc::Memory { .. } => AllocKind::LiveData,
+                GlobalAlloc::TypeId { .. }
+                | GlobalAlloc::Static { .. }
+                | GlobalAlloc::Memory { .. } => AllocKind::LiveData,
                 GlobalAlloc::Function { .. } => bug!("We already checked function pointers above"),
                 GlobalAlloc::VTable { .. } => AllocKind::VTable,
             };
@@ -936,6 +946,19 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         }
     }
 
+    /// Takes a pointer that is the first chunk of a `TypeId` and return the type that its
+    /// provenance refers to, as well as the segment of the hash that this pointer covers.
+    pub fn get_ptr_type_id(
+        &self,
+        ptr: Pointer<Option<M::Provenance>>,
+    ) -> InterpResult<'tcx, (Ty<'tcx>, Size)> {
+        let (alloc_id, offset, _meta) = self.ptr_get_alloc_id(ptr, 0)?;
+        let GlobalAlloc::TypeId { ty } = self.tcx.global_alloc(alloc_id) else {
+            throw_ub_format!("type_id_eq: `TypeId` provenance is not a type id")
+        };
+        interp_ok((ty, offset))
+    }
+
     pub fn get_ptr_fn(
         &self,
         ptr: Pointer<Option<M::Provenance>>,
@@ -1197,6 +1220,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for DumpAllocs<'a, 'tcx, M> {
                         Some(GlobalAlloc::VTable(ty, dyn_ty)) => {
                             write!(fmt, " (vtable: impl {dyn_ty} for {ty})")?;
                         }
+                        Some(GlobalAlloc::TypeId { ty }) => {
+                            write!(fmt, " (typeid for {ty})")?;
+                        }
                         Some(GlobalAlloc::Static(did)) => {
                             write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;
                         }
@@ -1233,7 +1259,7 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
 
     /// `offset` is relative to this allocation reference, not the base of the allocation.
     pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> {
-        self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
+        self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size()), val)
     }
 
     /// Mark the given sub-range (relative to this allocation reference) as uninitialized.
@@ -1285,7 +1311,7 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Pr
     /// `offset` is relative to this allocation reference, not the base of the allocation.
     pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> {
         self.read_scalar(
-            alloc_range(offset, self.tcx.data_layout().pointer_size),
+            alloc_range(offset, self.tcx.data_layout().pointer_size()),
             /*read_provenance*/ true,
         )
     }
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index 8303f891f98..2fc372dd019 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -37,6 +37,7 @@ pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable};
 use self::place::{MemPlace, Place};
 pub use self::projection::{OffsetMode, Projectable};
 pub use self::stack::{Frame, FrameInfo, LocalState, ReturnContinuation, StackPopInfo};
+pub use self::util::EnteredTraceSpan;
 pub(crate) use self::util::create_static_alloc;
 pub use self::validity::{CtfeValidationMode, RangeSet, RefTracking};
 pub use self::visitor::ValueVisitor;
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index 306697d4ec9..f72c4418081 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -296,7 +296,11 @@ where
         base: &'a P,
     ) -> InterpResult<'tcx, ArrayIterator<'a, 'tcx, M::Provenance, P>> {
         let abi::FieldsShape::Array { stride, .. } = base.layout().fields else {
-            span_bug!(self.cur_span(), "project_array_fields: expected an array layout");
+            span_bug!(
+                self.cur_span(),
+                "project_array_fields: expected an array layout, got {:#?}",
+                base.layout()
+            );
         };
         let len = base.len(self)?;
         let field_layout = base.layout().field(self, 0);
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
index 99add01f95c..72650d545c3 100644
--- a/compiler/rustc_const_eval/src/interpret/util.rs
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -46,21 +46,20 @@ pub(crate) fn create_static_alloc<'tcx>(
     interp_ok(ecx.ptr_to_mplace(Pointer::from(alloc_id).into(), layout))
 }
 
-/// This struct is needed to enforce `#[must_use]` on [tracing::span::EnteredSpan]
-/// while wrapping them in an `Option`.
-#[must_use]
-pub enum MaybeEnteredSpan {
-    Some(tracing::span::EnteredSpan),
-    None,
-}
+/// A marker trait returned by [crate::interpret::Machine::enter_trace_span], identifying either a
+/// real [tracing::span::EnteredSpan] in case tracing is enabled, or the dummy type `()` when
+/// tracing is disabled.
+pub trait EnteredTraceSpan {}
+impl EnteredTraceSpan for () {}
+impl EnteredTraceSpan for tracing::span::EnteredSpan {}
 
+/// Shortand for calling [crate::interpret::Machine::enter_trace_span] on a [tracing::info_span].
+/// This is supposed to be compiled out when [crate::interpret::Machine::enter_trace_span] has the
+/// default implementation (i.e. when it does not actually enter the span but instead returns `()`).
+/// Note: the result of this macro **must be used** because the span is exited when it's dropped.
 #[macro_export]
 macro_rules! enter_trace_span {
     ($machine:ident, $($tt:tt)*) => {
-        if $machine::TRACING_ENABLED {
-            $crate::interpret::util::MaybeEnteredSpan::Some(tracing::info_span!($($tt)*).entered())
-        } else {
-            $crate::interpret::util::MaybeEnteredSpan::None
-        }
+        $machine::enter_trace_span(|| tracing::info_span!($($tt)*))
     }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index fc4d13af8c4..fc44490c96d 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -571,40 +571,42 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
                     let alloc_actual_mutbl =
                         global_alloc.mutability(*self.ecx.tcx, self.ecx.typing_env);
 
-                    if let GlobalAlloc::Static(did) = global_alloc {
-                        let DefKind::Static { nested, .. } = self.ecx.tcx.def_kind(did) else {
-                            bug!()
-                        };
-                        // Special handling for pointers to statics (irrespective of their type).
-                        assert!(!self.ecx.tcx.is_thread_local_static(did));
-                        assert!(self.ecx.tcx.is_static(did));
-                        // Mode-specific checks
-                        match ctfe_mode {
-                            CtfeValidationMode::Static { .. }
-                            | CtfeValidationMode::Promoted { .. } => {
-                                // We skip recursively checking other statics. These statics must be sound by
-                                // themselves, and the only way to get broken statics here is by using
-                                // unsafe code.
-                                // The reasons we don't check other statics is twofold. For one, in all
-                                // sound cases, the static was already validated on its own, and second, we
-                                // trigger cycle errors if we try to compute the value of the other static
-                                // and that static refers back to us (potentially through a promoted).
-                                // This could miss some UB, but that's fine.
-                                // We still walk nested allocations, as they are fundamentally part of this validation run.
-                                // This means we will also recurse into nested statics of *other*
-                                // statics, even though we do not recurse into other statics directly.
-                                // That's somewhat inconsistent but harmless.
-                                skip_recursive_check = !nested;
-                            }
-                            CtfeValidationMode::Const { .. } => {
-                                // If this is mutable memory or an `extern static`, there's no point in checking it -- we'd
-                                // just get errors trying to read the value.
-                                if alloc_actual_mutbl.is_mut() || self.ecx.tcx.is_foreign_item(did)
-                                {
-                                    skip_recursive_check = true;
+                    match global_alloc {
+                        GlobalAlloc::Static(did) => {
+                            let DefKind::Static { nested, .. } = self.ecx.tcx.def_kind(did) else {
+                                bug!()
+                            };
+                            assert!(!self.ecx.tcx.is_thread_local_static(did));
+                            assert!(self.ecx.tcx.is_static(did));
+                            match ctfe_mode {
+                                CtfeValidationMode::Static { .. }
+                                | CtfeValidationMode::Promoted { .. } => {
+                                    // We skip recursively checking other statics. These statics must be sound by
+                                    // themselves, and the only way to get broken statics here is by using
+                                    // unsafe code.
+                                    // The reasons we don't check other statics is twofold. For one, in all
+                                    // sound cases, the static was already validated on its own, and second, we
+                                    // trigger cycle errors if we try to compute the value of the other static
+                                    // and that static refers back to us (potentially through a promoted).
+                                    // This could miss some UB, but that's fine.
+                                    // We still walk nested allocations, as they are fundamentally part of this validation run.
+                                    // This means we will also recurse into nested statics of *other*
+                                    // statics, even though we do not recurse into other statics directly.
+                                    // That's somewhat inconsistent but harmless.
+                                    skip_recursive_check = !nested;
+                                }
+                                CtfeValidationMode::Const { .. } => {
+                                    // If this is mutable memory or an `extern static`, there's no point in checking it -- we'd
+                                    // just get errors trying to read the value.
+                                    if alloc_actual_mutbl.is_mut()
+                                        || self.ecx.tcx.is_foreign_item(did)
+                                    {
+                                        skip_recursive_check = true;
+                                    }
                                 }
                             }
                         }
+                        _ => (),
                     }
 
                     // If this allocation has size zero, there is no actual mutability here.
diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs
index 1b4db7adc27..881aa679156 100644
--- a/compiler/rustc_data_structures/src/profiling.rs
+++ b/compiler/rustc_data_structures/src/profiling.rs
@@ -142,7 +142,7 @@ const EVENT_FILTERS_BY_NAME: &[(&str, EventFilter)] = &[
     ("generic-activity", EventFilter::GENERIC_ACTIVITIES),
     ("query-provider", EventFilter::QUERY_PROVIDERS),
     ("query-cache-hit", EventFilter::QUERY_CACHE_HITS),
-    ("query-cache-hit-count", EventFilter::QUERY_CACHE_HITS),
+    ("query-cache-hit-count", EventFilter::QUERY_CACHE_HIT_COUNTS),
     ("query-blocked", EventFilter::QUERY_BLOCKED),
     ("incr-cache-load", EventFilter::INCR_CACHE_LOADS),
     ("query-keys", EventFilter::QUERY_KEYS),
diff --git a/compiler/rustc_errors/messages.ftl b/compiler/rustc_errors/messages.ftl
index d68dba0be5e..ad2e206260d 100644
--- a/compiler/rustc_errors/messages.ftl
+++ b/compiler/rustc_errors/messages.ftl
@@ -41,5 +41,8 @@ errors_target_invalid_bits =
 
 errors_target_invalid_bits_size = {$err}
 
+errors_target_invalid_datalayout_pointer_spec =
+    unknown pointer specification `{$err}` in datalayout string
+
 errors_target_missing_alignment =
     missing alignment for `{$cause}` in "data-layout"
diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs
index 5746c28a2ab..a128f8d31a1 100644
--- a/compiler/rustc_errors/src/diagnostic.rs
+++ b/compiler/rustc_errors/src/diagnostic.rs
@@ -1165,7 +1165,7 @@ impl<'a, G: EmissionGuarantee> Diag<'a, G> {
         self.push_suggestion(CodeSuggestion {
             substitutions,
             msg: self.subdiagnostic_message_to_diagnostic_message(msg),
-            style: SuggestionStyle::ShowCode,
+            style: SuggestionStyle::ShowAlways,
             applicability,
         });
         self
diff --git a/compiler/rustc_errors/src/diagnostic_impls.rs b/compiler/rustc_errors/src/diagnostic_impls.rs
index 8b59ba9984c..eeb9ac28808 100644
--- a/compiler/rustc_errors/src/diagnostic_impls.rs
+++ b/compiler/rustc_errors/src/diagnostic_impls.rs
@@ -374,6 +374,10 @@ impl<G: EmissionGuarantee> Diagnostic<'_, G> for TargetDataLayoutErrors<'_> {
             TargetDataLayoutErrors::InvalidBitsSize { err } => {
                 Diag::new(dcx, level, fluent::errors_target_invalid_bits_size).with_arg("err", err)
             }
+            TargetDataLayoutErrors::UnknownPointerSpecification { err } => {
+                Diag::new(dcx, level, fluent::errors_target_invalid_datalayout_pointer_spec)
+                    .with_arg("err", err)
+            }
         }
     }
 }
diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs
index 2f398cea926..510f37f37e2 100644
--- a/compiler/rustc_errors/src/emitter.rs
+++ b/compiler/rustc_errors/src/emitter.rs
@@ -3526,7 +3526,7 @@ pub fn is_case_difference(sm: &SourceMap, suggested: &str, sp: Span) -> bool {
     // All the chars that differ in capitalization are confusable (above):
     let confusable = iter::zip(found.chars(), suggested.chars())
         .filter(|(f, s)| f != s)
-        .all(|(f, s)| (ascii_confusables.contains(&f) || ascii_confusables.contains(&s)));
+        .all(|(f, s)| ascii_confusables.contains(&f) || ascii_confusables.contains(&s));
     confusable && found.to_lowercase() == suggested.to_lowercase()
             // FIXME: We sometimes suggest the same thing we already have, which is a
             //        bug, but be defensive against that here.
diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs
index 69ad15c6081..381d780077d 100644
--- a/compiler/rustc_errors/src/lib.rs
+++ b/compiler/rustc_errors/src/lib.rs
@@ -3,7 +3,6 @@
 //! This module contains the code for creating and emitting diagnostics.
 
 // tidy-alphabetical-start
-#![allow(incomplete_features)]
 #![allow(internal_features)]
 #![allow(rustc::diagnostic_outside_of_impl)]
 #![allow(rustc::direct_use_of_rustc_type_ir)]
diff --git a/compiler/rustc_expand/messages.ftl b/compiler/rustc_expand/messages.ftl
index 3fc0fa06191..89d6e62834d 100644
--- a/compiler/rustc_expand/messages.ftl
+++ b/compiler/rustc_expand/messages.ftl
@@ -133,6 +133,32 @@ expand_module_multiple_candidates =
 expand_must_repeat_once =
     this must repeat at least once
 
+expand_mve_extra_tokens =
+    unexpected trailing tokens
+    .label = for this metavariable expression
+    .range = the `{$name}` metavariable expression takes between {$min_or_exact_args} and {$max_args} arguments
+    .exact = the `{$name}` metavariable expression takes {$min_or_exact_args ->
+        [zero] no arguments
+        [one] a single argument
+        *[other] {$min_or_exact_args} arguments
+    }
+    .suggestion = try removing {$extra_count ->
+        [one] this token
+        *[other] these tokens
+    }
+
+expand_mve_missing_paren =
+    expected `(`
+    .label = for this this metavariable expression
+    .unexpected = unexpected token
+    .note = metavariable expressions use function-like parentheses syntax
+    .suggestion = try adding parentheses
+
+expand_mve_unrecognized_expr =
+    unrecognized metavariable expression
+    .label = not a valid metavariable expression
+    .note = valid metavariable expressions are {$valid_expr_list}
+
 expand_mve_unrecognized_var =
     variable `{$key}` is not recognized in meta-variable expression
 
diff --git a/compiler/rustc_expand/src/errors.rs b/compiler/rustc_expand/src/errors.rs
index fdbc65aff68..3ac5d213053 100644
--- a/compiler/rustc_expand/src/errors.rs
+++ b/compiler/rustc_expand/src/errors.rs
@@ -496,6 +496,50 @@ pub(crate) use metavar_exprs::*;
 mod metavar_exprs {
     use super::*;
 
+    #[derive(Diagnostic, Default)]
+    #[diag(expand_mve_extra_tokens)]
+    pub(crate) struct MveExtraTokens {
+        #[primary_span]
+        #[suggestion(code = "", applicability = "machine-applicable")]
+        pub span: Span,
+        #[label]
+        pub ident_span: Span,
+        pub extra_count: usize,
+
+        // The rest is only used for specific diagnostics and can be default if neither
+        // `note` is `Some`.
+        #[note(expand_exact)]
+        pub exact_args_note: Option<()>,
+        #[note(expand_range)]
+        pub range_args_note: Option<()>,
+        pub min_or_exact_args: usize,
+        pub max_args: usize,
+        pub name: String,
+    }
+
+    #[derive(Diagnostic)]
+    #[note]
+    #[diag(expand_mve_missing_paren)]
+    pub(crate) struct MveMissingParen {
+        #[primary_span]
+        #[label]
+        pub ident_span: Span,
+        #[label(expand_unexpected)]
+        pub unexpected_span: Option<Span>,
+        #[suggestion(code = "( /* ... */ )", applicability = "has-placeholders")]
+        pub insert_span: Option<Span>,
+    }
+
+    #[derive(Diagnostic)]
+    #[note]
+    #[diag(expand_mve_unrecognized_expr)]
+    pub(crate) struct MveUnrecognizedExpr {
+        #[primary_span]
+        #[label]
+        pub span: Span,
+        pub valid_expr_list: &'static str,
+    }
+
     #[derive(Diagnostic)]
     #[diag(expand_mve_unrecognized_var)]
     pub(crate) struct MveUnrecognizedVar {
diff --git a/compiler/rustc_expand/src/mbe/metavar_expr.rs b/compiler/rustc_expand/src/mbe/metavar_expr.rs
index ffd3548019a..d2b275ad20a 100644
--- a/compiler/rustc_expand/src/mbe/metavar_expr.rs
+++ b/compiler/rustc_expand/src/mbe/metavar_expr.rs
@@ -7,6 +7,8 @@ use rustc_macros::{Decodable, Encodable};
 use rustc_session::parse::ParseSess;
 use rustc_span::{Ident, Span, Symbol};
 
+use crate::errors;
+
 pub(crate) const RAW_IDENT_ERR: &str = "`${concat(..)}` currently does not support raw identifiers";
 pub(crate) const UNSUPPORTED_CONCAT_ELEM_ERR: &str = "expected identifier or string literal";
 
@@ -40,11 +42,32 @@ impl MetaVarExpr {
     ) -> PResult<'psess, MetaVarExpr> {
         let mut iter = input.iter();
         let ident = parse_ident(&mut iter, psess, outer_span)?;
-        let Some(TokenTree::Delimited(.., Delimiter::Parenthesis, args)) = iter.next() else {
-            let msg = "meta-variable expression parameter must be wrapped in parentheses";
-            return Err(psess.dcx().struct_span_err(ident.span, msg));
+        let next = iter.next();
+        let Some(TokenTree::Delimited(.., Delimiter::Parenthesis, args)) = next else {
+            // No `()`; wrong or no delimiters. Point at a problematic span or a place to
+            // add parens if it makes sense.
+            let (unexpected_span, insert_span) = match next {
+                Some(TokenTree::Delimited(..)) => (None, None),
+                Some(tt) => (Some(tt.span()), None),
+                None => (None, Some(ident.span.shrink_to_hi())),
+            };
+            let err =
+                errors::MveMissingParen { ident_span: ident.span, unexpected_span, insert_span };
+            return Err(psess.dcx().create_err(err));
         };
-        check_trailing_token(&mut iter, psess)?;
+
+        // Ensure there are no trailing tokens in the braces, e.g. `${foo() extra}`
+        if iter.peek().is_some() {
+            let span = iter_span(&iter).expect("checked is_some above");
+            let err = errors::MveExtraTokens {
+                span,
+                ident_span: ident.span,
+                extra_count: iter.count(),
+                ..Default::default()
+            };
+            return Err(psess.dcx().create_err(err));
+        }
+
         let mut iter = args.iter();
         let rslt = match ident.as_str() {
             "concat" => parse_concat(&mut iter, psess, outer_span, ident.span)?,
@@ -56,18 +79,14 @@ impl MetaVarExpr {
             "index" => MetaVarExpr::Index(parse_depth(&mut iter, psess, ident.span)?),
             "len" => MetaVarExpr::Len(parse_depth(&mut iter, psess, ident.span)?),
             _ => {
-                let err_msg = "unrecognized meta-variable expression";
-                let mut err = psess.dcx().struct_span_err(ident.span, err_msg);
-                err.span_suggestion(
-                    ident.span,
-                    "supported expressions are count, ignore, index and len",
-                    "",
-                    Applicability::MachineApplicable,
-                );
-                return Err(err);
+                let err = errors::MveUnrecognizedExpr {
+                    span: ident.span,
+                    valid_expr_list: "`count`, `ignore`, `index`, `len`, and `concat`",
+                };
+                return Err(psess.dcx().create_err(err));
             }
         };
-        check_trailing_token(&mut iter, psess)?;
+        check_trailing_tokens(&mut iter, psess, ident)?;
         Ok(rslt)
     }
 
@@ -87,20 +106,51 @@ impl MetaVarExpr {
     }
 }
 
-// Checks if there are any remaining tokens. For example, `${ignore(ident ... a b c ...)}`
-fn check_trailing_token<'psess>(
+/// Checks if there are any remaining tokens (for example, `${ignore($valid, extra)}`) and create
+/// a diag with the correct arg count if so.
+fn check_trailing_tokens<'psess>(
     iter: &mut TokenStreamIter<'_>,
     psess: &'psess ParseSess,
+    ident: Ident,
 ) -> PResult<'psess, ()> {
-    if let Some(tt) = iter.next() {
-        let mut diag = psess
-            .dcx()
-            .struct_span_err(tt.span(), format!("unexpected token: {}", pprust::tt_to_string(tt)));
-        diag.span_note(tt.span(), "meta-variable expression must not have trailing tokens");
-        Err(diag)
-    } else {
-        Ok(())
+    if iter.peek().is_none() {
+        // All tokens consumed, as expected
+        return Ok(());
     }
+
+    // `None` for max indicates the arg count must be exact, `Some` indicates a range is accepted.
+    let (min_or_exact_args, max_args) = match ident.as_str() {
+        "concat" => panic!("concat takes unlimited tokens but didn't eat them all"),
+        "ignore" => (1, None),
+        // 1 or 2 args
+        "count" => (1, Some(2)),
+        // 0 or 1 arg
+        "index" => (0, Some(1)),
+        "len" => (0, Some(1)),
+        other => unreachable!("unknown MVEs should be rejected earlier (got `{other}`)"),
+    };
+
+    let err = errors::MveExtraTokens {
+        span: iter_span(iter).expect("checked is_none above"),
+        ident_span: ident.span,
+        extra_count: iter.count(),
+
+        exact_args_note: if max_args.is_some() { None } else { Some(()) },
+        range_args_note: if max_args.is_some() { Some(()) } else { None },
+        min_or_exact_args,
+        max_args: max_args.unwrap_or_default(),
+        name: ident.to_string(),
+    };
+    Err(psess.dcx().create_err(err))
+}
+
+/// Returns a span encompassing all tokens in the iterator if there is at least one item.
+fn iter_span(iter: &TokenStreamIter<'_>) -> Option<Span> {
+    let mut iter = iter.clone(); // cloning is cheap
+    let first_sp = iter.next()?.span();
+    let last_sp = iter.last().map(TokenTree::span).unwrap_or(first_sp);
+    let span = first_sp.with_hi(last_sp.hi());
+    Some(span)
 }
 
 /// Indicates what is placed in a `concat` parameter. For example, literals
diff --git a/compiler/rustc_hir/src/lang_items.rs b/compiler/rustc_hir/src/lang_items.rs
index c11db63ba11..6347f1bfa71 100644
--- a/compiler/rustc_hir/src/lang_items.rs
+++ b/compiler/rustc_hir/src/lang_items.rs
@@ -274,6 +274,8 @@ language_item_table! {
     PartialOrd,              sym::partial_ord,         partial_ord_trait,          Target::Trait,          GenericRequirement::Exact(1);
     CVoid,                   sym::c_void,              c_void,                     Target::Enum,           GenericRequirement::None;
 
+    TypeId,                  sym::type_id,             type_id,                    Target::Struct,         GenericRequirement::None;
+
     // A number of panic-related lang items. The `panic` item corresponds to divide-by-zero and
     // various panic cases with `match`. The `panic_bounds_check` item is for indexing arrays.
     //
diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs
index bd89d010a3c..f4bbf03f0c2 100644
--- a/compiler/rustc_hir_analysis/src/check/check.rs
+++ b/compiler/rustc_hir_analysis/src/check/check.rs
@@ -768,15 +768,14 @@ pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(),
                     check_static_inhabited(tcx, def_id);
                     check_static_linkage(tcx, def_id);
                     res = res.and(wfcheck::check_static_item(tcx, def_id));
-
-                    // Only `Node::Item` and `Node::ForeignItem` still have HIR based
-                    // checks. Returning early here does not miss any checks and
-                    // avoids this query from having a direct dependency edge on the HIR
-                    return res;
                 }
-                DefKind::Const => {}
+                DefKind::Const => res = res.and(wfcheck::check_const_item(tcx, def_id)),
                 _ => unreachable!(),
             }
+            // Only `Node::Item` and `Node::ForeignItem` still have HIR based
+            // checks. Returning early here does not miss any checks and
+            // avoids this query from having a direct dependency edge on the HIR
+            return res;
         }
         DefKind::Enum => {
             tcx.ensure_ok().generics_of(def_id);
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
index cebf7d1b532..e3532ade32f 100644
--- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs
+++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
@@ -93,6 +93,7 @@ fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: LocalDefId) -> hi
         | sym::three_way_compare
         | sym::discriminant_value
         | sym::type_id
+        | sym::type_id_eq
         | sym::select_unpredictable
         | sym::cold_path
         | sym::ptr_guaranteed_cmp
@@ -220,7 +221,13 @@ pub(crate) fn check_intrinsic_type(
         sym::needs_drop => (1, 0, vec![], tcx.types.bool),
 
         sym::type_name => (1, 0, vec![], Ty::new_static_str(tcx)),
-        sym::type_id => (1, 0, vec![], tcx.types.u128),
+        sym::type_id => {
+            (1, 0, vec![], tcx.type_of(tcx.lang_items().type_id().unwrap()).instantiate_identity())
+        }
+        sym::type_id_eq => {
+            let type_id = tcx.type_of(tcx.lang_items().type_id().unwrap()).instantiate_identity();
+            (0, 0, vec![type_id, type_id], tcx.types.bool)
+        }
         sym::offset => (2, 0, vec![param(0), param(1)], param(0)),
         sym::arith_offset => (
             1,
diff --git a/compiler/rustc_hir_analysis/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs
index 0a3e018b79a..428d627ad6f 100644
--- a/compiler/rustc_hir_analysis/src/check/wfcheck.rs
+++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs
@@ -290,7 +290,6 @@ pub(super) fn check_item<'tcx>(
             res
         }
         hir::ItemKind::Fn { sig, .. } => check_item_fn(tcx, def_id, sig.decl),
-        hir::ItemKind::Const(_, _, ty, _) => check_const_item(tcx, def_id, ty.span),
         hir::ItemKind::Struct(..) => check_type_defn(tcx, item, false),
         hir::ItemKind::Union(..) => check_type_defn(tcx, item, true),
         hir::ItemKind::Enum(..) => check_type_defn(tcx, item, true),
@@ -1185,7 +1184,8 @@ pub(super) fn check_static_item(
 ) -> Result<(), ErrorGuaranteed> {
     enter_wf_checking_ctxt(tcx, item_id, |wfcx| {
         let ty = tcx.type_of(item_id).instantiate_identity();
-        let item_ty = wfcx.deeply_normalize(DUMMY_SP, Some(WellFormedLoc::Ty(item_id)), ty);
+        let span = tcx.ty_span(item_id);
+        let item_ty = wfcx.deeply_normalize(span, Some(WellFormedLoc::Ty(item_id)), ty);
 
         let is_foreign_item = tcx.is_foreign_item(item_id);
 
@@ -1194,7 +1194,7 @@ pub(super) fn check_static_item(
             !matches!(tail.kind(), ty::Foreign(_))
         };
 
-        wfcx.register_wf_obligation(DUMMY_SP, Some(WellFormedLoc::Ty(item_id)), item_ty.into());
+        wfcx.register_wf_obligation(span, Some(WellFormedLoc::Ty(item_id)), item_ty.into());
         if forbid_unsized {
             let span = tcx.def_span(item_id);
             wfcx.register_bound(
@@ -1216,7 +1216,6 @@ pub(super) fn check_static_item(
             && !tcx.is_thread_local_static(item_id.to_def_id());
 
         if should_check_for_sync {
-            let span = tcx.def_span(item_id);
             wfcx.register_bound(
                 traits::ObligationCause::new(
                     span,
@@ -1232,13 +1231,10 @@ pub(super) fn check_static_item(
     })
 }
 
-fn check_const_item(
-    tcx: TyCtxt<'_>,
-    def_id: LocalDefId,
-    ty_span: Span,
-) -> Result<(), ErrorGuaranteed> {
+pub(crate) fn check_const_item(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(), ErrorGuaranteed> {
     enter_wf_checking_ctxt(tcx, def_id, |wfcx| {
         let ty = tcx.type_of(def_id).instantiate_identity();
+        let ty_span = tcx.ty_span(def_id);
         let ty = wfcx.deeply_normalize(ty_span, Some(WellFormedLoc::Ty(def_id)), ty);
 
         wfcx.register_wf_obligation(ty_span, Some(WellFormedLoc::Ty(def_id)), ty.into());
@@ -1505,7 +1501,7 @@ pub(super) fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, def_id:
             let cause = traits::ObligationCause::new(
                 sp,
                 wfcx.body_def_id,
-                ObligationCauseCode::WhereClause(def_id.to_def_id(), DUMMY_SP),
+                ObligationCauseCode::WhereClause(def_id.to_def_id(), sp),
             );
             Obligation::new(tcx, cause, wfcx.param_env, pred)
         });
diff --git a/compiler/rustc_hir_typeck/messages.ftl b/compiler/rustc_hir_typeck/messages.ftl
index c21b16c9f9f..bac4d70103c 100644
--- a/compiler/rustc_hir_typeck/messages.ftl
+++ b/compiler/rustc_hir_typeck/messages.ftl
@@ -82,13 +82,6 @@ hir_typeck_cast_unknown_pointer = cannot cast {$to ->
 hir_typeck_const_continue_bad_label =
     `#[const_continue]` must break to a labeled block that participates in a `#[loop_match]`
 
-hir_typeck_const_select_must_be_const = this argument must be a `const fn`
-    .help = consult the documentation on `const_eval_select` for more information
-
-hir_typeck_const_select_must_be_fn = this argument must be a function item
-    .note = expected a function item, found {$ty}
-    .help = consult the documentation on `const_eval_select` for more information
-
 hir_typeck_continue_labeled_block =
     `continue` pointing to a labeled block
     .label = labeled blocks cannot be `continue`'d
diff --git a/compiler/rustc_hir_typeck/src/callee.rs b/compiler/rustc_hir_typeck/src/callee.rs
index 7611f8ac3e1..48bb45de53e 100644
--- a/compiler/rustc_hir_typeck/src/callee.rs
+++ b/compiler/rustc_hir_typeck/src/callee.rs
@@ -578,29 +578,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             }
         }
 
-        if let Some(def_id) = def_id
-            && self.tcx.def_kind(def_id) == hir::def::DefKind::Fn
-            && self.tcx.is_intrinsic(def_id, sym::const_eval_select)
-        {
-            let fn_sig = self.resolve_vars_if_possible(fn_sig);
-            for idx in 0..=1 {
-                let arg_ty = fn_sig.inputs()[idx + 1];
-                let span = arg_exprs.get(idx + 1).map_or(call_expr.span, |arg| arg.span);
-                // Check that second and third argument of `const_eval_select` must be `FnDef`, and additionally that
-                // the second argument must be `const fn`. The first argument must be a tuple, but this is already expressed
-                // in the function signature (`F: FnOnce<ARG>`), so I did not bother to add another check here.
-                //
-                // This check is here because there is currently no way to express a trait bound for `FnDef` types only.
-                if let ty::FnDef(def_id, _args) = *arg_ty.kind() {
-                    if idx == 0 && !self.tcx.is_const_fn(def_id) {
-                        self.dcx().emit_err(errors::ConstSelectMustBeConst { span });
-                    }
-                } else {
-                    self.dcx().emit_err(errors::ConstSelectMustBeFn { span, ty: arg_ty });
-                }
-            }
-        }
-
         fn_sig.output()
     }
 
diff --git a/compiler/rustc_hir_typeck/src/errors.rs b/compiler/rustc_hir_typeck/src/errors.rs
index 3606c778fc4..a8bb6956f10 100644
--- a/compiler/rustc_hir_typeck/src/errors.rs
+++ b/compiler/rustc_hir_typeck/src/errors.rs
@@ -606,24 +606,6 @@ impl Subdiagnostic for RemoveSemiForCoerce {
 }
 
 #[derive(Diagnostic)]
-#[diag(hir_typeck_const_select_must_be_const)]
-#[help]
-pub(crate) struct ConstSelectMustBeConst {
-    #[primary_span]
-    pub span: Span,
-}
-
-#[derive(Diagnostic)]
-#[diag(hir_typeck_const_select_must_be_fn)]
-#[note]
-#[help]
-pub(crate) struct ConstSelectMustBeFn<'a> {
-    #[primary_span]
-    pub span: Span,
-    pub ty: Ty<'a>,
-}
-
-#[derive(Diagnostic)]
 #[diag(hir_typeck_union_pat_multiple_fields)]
 pub(crate) struct UnionPatMultipleFields {
     #[primary_span]
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs
index 2bc007b3ad4..4e4bf8a5562 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs
@@ -184,9 +184,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                     return true;
                 }
 
-                for param in [param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
-                    .into_iter()
-                    .flatten()
+                for param in [
+                    predicate_self_type_to_point_at,
+                    param_to_point_at,
+                    fallback_param_to_point_at,
+                    self_param_to_point_at,
+                ]
+                .into_iter()
+                .flatten()
                 {
                     if self.blame_specific_arg_if_possible(
                         error,
diff --git a/compiler/rustc_hir_typeck/src/upvar.rs b/compiler/rustc_hir_typeck/src/upvar.rs
index 982cfa2e42b..be774106abf 100644
--- a/compiler/rustc_hir_typeck/src/upvar.rs
+++ b/compiler/rustc_hir_typeck/src/upvar.rs
@@ -529,7 +529,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         // process any deferred resolutions.
         let deferred_call_resolutions = self.remove_deferred_call_resolutions(closure_def_id);
         for deferred_call_resolution in deferred_call_resolutions {
-            deferred_call_resolution.resolve(self);
+            deferred_call_resolution.resolve(&mut FnCtxt::new(
+                self,
+                self.param_env,
+                closure_def_id,
+            ));
         }
     }
 
diff --git a/compiler/rustc_infer/src/lib.rs b/compiler/rustc_infer/src/lib.rs
index 285e2912937..e562c583313 100644
--- a/compiler/rustc_infer/src/lib.rs
+++ b/compiler/rustc_infer/src/lib.rs
@@ -14,9 +14,7 @@
 
 // tidy-alphabetical-start
 #![allow(internal_features)]
-#![allow(rustc::diagnostic_outside_of_impl)]
 #![allow(rustc::direct_use_of_rustc_type_ir)]
-#![allow(rustc::untranslatable_diagnostic)]
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
 #![doc(rust_logo)]
 #![feature(assert_matches)]
diff --git a/compiler/rustc_lint/src/early/diagnostics.rs b/compiler/rustc_lint/src/early/diagnostics.rs
index 3b0a36186b6..653559009cc 100644
--- a/compiler/rustc_lint/src/early/diagnostics.rs
+++ b/compiler/rustc_lint/src/early/diagnostics.rs
@@ -1,6 +1,3 @@
-#![allow(rustc::diagnostic_outside_of_impl)]
-#![allow(rustc::untranslatable_diagnostic)]
-
 use std::borrow::Cow;
 
 use rustc_ast::util::unicode::TEXT_FLOW_CONTROL_CHARS;
diff --git a/compiler/rustc_lint/src/lints.rs b/compiler/rustc_lint/src/lints.rs
index 5e05b58146e..21148833eaf 100644
--- a/compiler/rustc_lint/src/lints.rs
+++ b/compiler/rustc_lint/src/lints.rs
@@ -1,4 +1,3 @@
-#![allow(rustc::diagnostic_outside_of_impl)]
 #![allow(rustc::untranslatable_diagnostic)]
 use std::num::NonZero;
 
diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs
index e41bc8f852e..fc9d795cb23 100644
--- a/compiler/rustc_lint/src/types.rs
+++ b/compiler/rustc_lint/src/types.rs
@@ -59,8 +59,7 @@ declare_lint! {
 }
 
 declare_lint! {
-    /// The `overflowing_literals` lint detects literal out of range for its
-    /// type.
+    /// The `overflowing_literals` lint detects literals out of range for their type.
     ///
     /// ### Example
     ///
@@ -72,9 +71,9 @@ declare_lint! {
     ///
     /// ### Explanation
     ///
-    /// It is usually a mistake to use a literal that overflows the type where
-    /// it is used. Either use a literal that is within range, or change the
-    /// type to be within the range of the literal.
+    /// It is usually a mistake to use a literal that overflows its type
+    /// Change either the literal or its type such that the literal is
+    /// within the range of its type.
     OVERFLOWING_LITERALS,
     Deny,
     "literal out of range for its type"
diff --git a/compiler/rustc_lint/src/unused.rs b/compiler/rustc_lint/src/unused.rs
index d3942a1c816..a9eb1739f7f 100644
--- a/compiler/rustc_lint/src/unused.rs
+++ b/compiler/rustc_lint/src/unused.rs
@@ -1,7 +1,7 @@
 use std::iter;
 
 use rustc_ast::util::{classify, parser};
-use rustc_ast::{self as ast, ExprKind, HasAttrs as _, StmtKind};
+use rustc_ast::{self as ast, ExprKind, FnRetTy, HasAttrs as _, StmtKind};
 use rustc_attr_data_structures::{AttributeKind, find_attr};
 use rustc_data_structures::fx::FxHashMap;
 use rustc_errors::{MultiSpan, pluralize};
@@ -599,6 +599,7 @@ enum UnusedDelimsCtx {
     AnonConst,
     MatchArmExpr,
     IndexExpr,
+    ClosureBody,
 }
 
 impl From<UnusedDelimsCtx> for &'static str {
@@ -620,6 +621,7 @@ impl From<UnusedDelimsCtx> for &'static str {
             UnusedDelimsCtx::ArrayLenExpr | UnusedDelimsCtx::AnonConst => "const expression",
             UnusedDelimsCtx::MatchArmExpr => "match arm expression",
             UnusedDelimsCtx::IndexExpr => "index expression",
+            UnusedDelimsCtx::ClosureBody => "closure body",
         }
     }
 }
@@ -919,6 +921,11 @@ trait UnusedDelimLint {
                 let (args_to_check, ctx) = match *call_or_other {
                     Call(_, ref args) => (&args[..], UnusedDelimsCtx::FunctionArg),
                     MethodCall(ref call) => (&call.args[..], UnusedDelimsCtx::MethodArg),
+                    Closure(ref closure)
+                        if matches!(closure.fn_decl.output, FnRetTy::Default(_)) =>
+                    {
+                        (&[closure.body.clone()][..], UnusedDelimsCtx::ClosureBody)
+                    }
                     // actual catch-all arm
                     _ => {
                         return;
@@ -1508,6 +1515,7 @@ impl UnusedDelimLint for UnusedBraces {
                             && (ctx != UnusedDelimsCtx::AnonConst
                                 || (matches!(expr.kind, ast::ExprKind::Lit(_))
                                     && !expr.span.from_expansion()))
+                            && ctx != UnusedDelimsCtx::ClosureBody
                             && !cx.sess().source_map().is_multiline(value.span)
                             && value.attrs.is_empty()
                             && !value.span.from_expansion()
diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs
index b37548b281c..15f0bad226d 100644
--- a/compiler/rustc_lint_defs/src/builtin.rs
+++ b/compiler/rustc_lint_defs/src/builtin.rs
@@ -4343,11 +4343,12 @@ declare_lint! {
     ///
     /// [future-incompatible]: ../index.md#future-incompatible-lints
     pub AMBIGUOUS_GLOB_IMPORTS,
-    Warn,
+    Deny,
     "detects certain glob imports that require reporting an ambiguity error",
     @future_incompatible = FutureIncompatibleInfo {
         reason: FutureIncompatibilityReason::FutureReleaseError,
         reference: "issue #114095 <https://github.com/rust-lang/rust/issues/114095>",
+        report_in_deps: true,
     };
 }
 
diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
index d4a05fbbbc5..cc33764e485 100644
--- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
@@ -1275,7 +1275,7 @@ extern "C" void LLVMRustSetModuleCodeModel(LLVMModuleRef M,
 //
 // Otherwise I'll apologize in advance, it probably requires a relatively
 // significant investment on your part to "truly understand" what's going on
-// here. Not saying I do myself, but it took me awhile staring at LLVM's source
+// here. Not saying I do myself, but it took me a while staring at LLVM's source
 // and various online resources about ThinLTO to make heads or tails of all
 // this.
 
diff --git a/compiler/rustc_middle/src/arena.rs b/compiler/rustc_middle/src/arena.rs
index a0f45974089..f140ab4755b 100644
--- a/compiler/rustc_middle/src/arena.rs
+++ b/compiler/rustc_middle/src/arena.rs
@@ -1,5 +1,3 @@
-#![allow(rustc::usage_of_ty_tykind)]
-
 /// This higher-order macro declares a list of types which can be allocated by `Arena`.
 ///
 /// Specifying the `decode` modifier will add decode impls for `&T` and `&[T]` where `T` is the type
diff --git a/compiler/rustc_middle/src/hir/mod.rs b/compiler/rustc_middle/src/hir/mod.rs
index d7a8dce0536..6c07e49734a 100644
--- a/compiler/rustc_middle/src/hir/mod.rs
+++ b/compiler/rustc_middle/src/hir/mod.rs
@@ -239,8 +239,16 @@ pub fn provide(providers: &mut Providers) {
         let hir_id = tcx.local_def_id_to_hir_id(def_id);
         tcx.hir_opt_ident_span(hir_id)
     };
+    providers.ty_span = |tcx, def_id| {
+        let node = tcx.hir_node_by_def_id(def_id);
+        match node.ty() {
+            Some(ty) => ty.span,
+            None => bug!("{def_id:?} doesn't have a type: {node:#?}"),
+        }
+    };
     providers.fn_arg_idents = |tcx, def_id| {
-        if let Some(body_id) = tcx.hir_node_by_def_id(def_id).body_id() {
+        let node = tcx.hir_node_by_def_id(def_id);
+        if let Some(body_id) = node.body_id() {
             tcx.arena.alloc_from_iter(tcx.hir_body_param_idents(body_id))
         } else if let Node::TraitItem(&TraitItem {
             kind: TraitItemKind::Fn(_, TraitFn::Required(idents)),
@@ -249,7 +257,7 @@ pub fn provide(providers: &mut Providers) {
         | Node::ForeignItem(&ForeignItem {
             kind: ForeignItemKind::Fn(_, idents, _),
             ..
-        }) = tcx.hir_node(tcx.local_def_id_to_hir_id(def_id))
+        }) = node
         {
             idents
         } else {
diff --git a/compiler/rustc_middle/src/mir/consts.rs b/compiler/rustc_middle/src/mir/consts.rs
index 16edc240544..fb941977528 100644
--- a/compiler/rustc_middle/src/mir/consts.rs
+++ b/compiler/rustc_middle/src/mir/consts.rs
@@ -142,7 +142,7 @@ impl<'tcx> ConstValue<'tcx> {
                 // The reference itself is stored behind an indirection.
                 // Load the reference, and then load the actual slice contents.
                 let a = tcx.global_alloc(alloc_id).unwrap_memory().inner();
-                let ptr_size = tcx.data_layout.pointer_size;
+                let ptr_size = tcx.data_layout.pointer_size();
                 if a.size() < offset + 2 * ptr_size {
                     // (partially) dangling reference
                     return None;
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
index d2cadc96b63..f039849d1bb 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -519,7 +519,7 @@ impl Allocation {
         let mut bytes = alloc_bytes(&*self.bytes, self.align)?;
         // Adjust provenance of pointers stored in this allocation.
         let mut new_provenance = Vec::with_capacity(self.provenance.ptrs().len());
-        let ptr_size = cx.data_layout().pointer_size.bytes_usize();
+        let ptr_size = cx.data_layout().pointer_size().bytes_usize();
         let endian = cx.data_layout().endian;
         for &(offset, alloc_id) in self.provenance.ptrs().iter() {
             let idx = offset.bytes_usize();
@@ -709,7 +709,7 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes>
         let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
 
         if read_provenance {
-            assert_eq!(range.size, cx.data_layout().pointer_size);
+            assert_eq!(range.size, cx.data_layout().pointer_size());
 
             // When reading data with provenance, the easy case is finding provenance exactly where we
             // are reading, then we can put data and provenance back together and return that.
@@ -782,7 +782,7 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes>
 
         // See if we have to also store some provenance.
         if let Some(provenance) = provenance {
-            assert_eq!(range.size, cx.data_layout().pointer_size);
+            assert_eq!(range.size, cx.data_layout().pointer_size());
             self.provenance.insert_ptr(range.start, provenance, cx);
         }
 
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs
index 63608947eb3..9c6e1664386 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs
@@ -71,7 +71,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
         // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
         // the beginning of this range.
         let adjusted_start = Size::from_bytes(
-            range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1),
+            range.start.bytes().saturating_sub(cx.data_layout().pointer_size().bytes() - 1),
         );
         adjusted_start..range.end()
     }
@@ -142,7 +142,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
     }
 
     pub fn insert_ptr(&mut self, offset: Size, prov: Prov, cx: &impl HasDataLayout) {
-        debug_assert!(self.range_empty(alloc_range(offset, cx.data_layout().pointer_size), cx));
+        debug_assert!(self.range_empty(alloc_range(offset, cx.data_layout().pointer_size()), cx));
         self.ptrs.insert(offset, prov);
     }
 
@@ -160,6 +160,8 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
             debug_assert!(self.bytes.is_none());
         }
 
+        let pointer_size = cx.data_layout().pointer_size();
+
         // For the ptr-sized part, find the first (inclusive) and last (exclusive) byte of
         // provenance that overlaps with the given range.
         let (first, last) = {
@@ -172,10 +174,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
             // This redoes some of the work of `range_get_ptrs_is_empty`, but this path is much
             // colder than the early return above, so it's worth it.
             let provenance = self.range_ptrs_get(range, cx);
-            (
-                provenance.first().unwrap().0,
-                provenance.last().unwrap().0 + cx.data_layout().pointer_size,
-            )
+            (provenance.first().unwrap().0, provenance.last().unwrap().0 + pointer_size)
         };
 
         // We need to handle clearing the provenance from parts of a pointer.
@@ -192,7 +191,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
             }
         }
         if last > end {
-            let begin_of_last = last - cx.data_layout().pointer_size;
+            let begin_of_last = last - pointer_size;
             if !Prov::OFFSET_IS_ADDR {
                 // We can't split up the provenance into less than a pointer.
                 return Err(AllocError::OverwritePartialPointer(begin_of_last));
@@ -255,7 +254,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
             // shift offsets from source allocation to destination allocation
             (offset - src.start) + dest_offset // `Size` operations
         };
-        let ptr_size = cx.data_layout().pointer_size;
+        let ptr_size = cx.data_layout().pointer_size();
 
         // # Pointer-sized provenances
         // Get the provenances that are entirely within this range.
diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs
index 8acb8fa9f80..71e0c943fbb 100644
--- a/compiler/rustc_middle/src/mir/interpret/error.rs
+++ b/compiler/rustc_middle/src/mir/interpret/error.rs
@@ -392,6 +392,8 @@ pub enum UndefinedBehaviorInfo<'tcx> {
     DerefFunctionPointer(AllocId),
     /// Trying to access the data behind a vtable pointer.
     DerefVTablePointer(AllocId),
+    /// Trying to access the actual type id.
+    DerefTypeIdPointer(AllocId),
     /// Using a non-boolean `u8` as bool.
     InvalidBool(u8),
     /// Using a non-character `u32` as character.
diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs
index da9e5bdbadd..bed99a4ff2a 100644
--- a/compiler/rustc_middle/src/mir/interpret/mod.rs
+++ b/compiler/rustc_middle/src/mir/interpret/mod.rs
@@ -103,6 +103,7 @@ enum AllocDiscriminant {
     Fn,
     VTable,
     Static,
+    Type,
 }
 
 pub fn specialized_encode_alloc_id<'tcx, E: TyEncoder<'tcx>>(
@@ -127,6 +128,11 @@ pub fn specialized_encode_alloc_id<'tcx, E: TyEncoder<'tcx>>(
             ty.encode(encoder);
             poly_trait_ref.encode(encoder);
         }
+        GlobalAlloc::TypeId { ty } => {
+            trace!("encoding {alloc_id:?} with {ty:#?}");
+            AllocDiscriminant::Type.encode(encoder);
+            ty.encode(encoder);
+        }
         GlobalAlloc::Static(did) => {
             assert!(!tcx.is_thread_local_static(did));
             // References to statics doesn't need to know about their allocations,
@@ -228,6 +234,12 @@ impl<'s> AllocDecodingSession<'s> {
                 trace!("decoded vtable alloc instance: {ty:?}, {poly_trait_ref:?}");
                 decoder.interner().reserve_and_set_vtable_alloc(ty, poly_trait_ref, CTFE_ALLOC_SALT)
             }
+            AllocDiscriminant::Type => {
+                trace!("creating typeid alloc ID");
+                let ty = Decodable::decode(decoder);
+                trace!("decoded typid: {ty:?}");
+                decoder.interner().reserve_and_set_type_id_alloc(ty)
+            }
             AllocDiscriminant::Static => {
                 trace!("creating extern static alloc ID");
                 let did = <DefId as Decodable<D>>::decode(decoder);
@@ -258,6 +270,9 @@ pub enum GlobalAlloc<'tcx> {
     Static(DefId),
     /// The alloc ID points to memory.
     Memory(ConstAllocation<'tcx>),
+    /// The first pointer-sized segment of a type id. On 64 bit systems, the 128 bit type id
+    /// is split into two segments, on 32 bit systems there are 4 segments, and so on.
+    TypeId { ty: Ty<'tcx> },
 }
 
 impl<'tcx> GlobalAlloc<'tcx> {
@@ -296,9 +311,10 @@ impl<'tcx> GlobalAlloc<'tcx> {
     pub fn address_space(&self, cx: &impl HasDataLayout) -> AddressSpace {
         match self {
             GlobalAlloc::Function { .. } => cx.data_layout().instruction_address_space,
-            GlobalAlloc::Static(..) | GlobalAlloc::Memory(..) | GlobalAlloc::VTable(..) => {
-                AddressSpace::DATA
-            }
+            GlobalAlloc::TypeId { .. }
+            | GlobalAlloc::Static(..)
+            | GlobalAlloc::Memory(..)
+            | GlobalAlloc::VTable(..) => AddressSpace::ZERO,
         }
     }
 
@@ -334,7 +350,7 @@ impl<'tcx> GlobalAlloc<'tcx> {
                 }
             }
             GlobalAlloc::Memory(alloc) => alloc.inner().mutability,
-            GlobalAlloc::Function { .. } | GlobalAlloc::VTable(..) => {
+            GlobalAlloc::TypeId { .. } | GlobalAlloc::Function { .. } | GlobalAlloc::VTable(..) => {
                 // These are immutable.
                 Mutability::Not
             }
@@ -380,8 +396,10 @@ impl<'tcx> GlobalAlloc<'tcx> {
             GlobalAlloc::Function { .. } => (Size::ZERO, Align::ONE),
             GlobalAlloc::VTable(..) => {
                 // No data to be accessed here. But vtables are pointer-aligned.
-                return (Size::ZERO, tcx.data_layout.pointer_align.abi);
+                (Size::ZERO, tcx.data_layout.pointer_align().abi)
             }
+            // Fake allocation, there's nothing to access here
+            GlobalAlloc::TypeId { .. } => (Size::ZERO, Align::ONE),
         }
     }
 }
@@ -487,6 +505,11 @@ impl<'tcx> TyCtxt<'tcx> {
         self.reserve_and_set_dedup(GlobalAlloc::VTable(ty, dyn_ty), salt)
     }
 
+    /// Generates an [AllocId] for a [core::any::TypeId]. Will get deduplicated.
+    pub fn reserve_and_set_type_id_alloc(self, ty: Ty<'tcx>) -> AllocId {
+        self.reserve_and_set_dedup(GlobalAlloc::TypeId { ty }, 0)
+    }
+
     /// Interns the `Allocation` and return a new `AllocId`, even if there's already an identical
     /// `Allocation` with a different `AllocId`.
     /// Statics with identical content will still point to the same `Allocation`, i.e.,
diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs
index 0ff14f15c13..e25ff7651f6 100644
--- a/compiler/rustc_middle/src/mir/interpret/pointer.rs
+++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs
@@ -16,7 +16,7 @@ pub trait PointerArithmetic: HasDataLayout {
 
     #[inline(always)]
     fn pointer_size(&self) -> Size {
-        self.data_layout().pointer_size
+        self.data_layout().pointer_size()
     }
 
     #[inline(always)]
diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs
index 8092f634dc8..90df29bb7e3 100644
--- a/compiler/rustc_middle/src/mir/interpret/value.rs
+++ b/compiler/rustc_middle/src/mir/interpret/value.rs
@@ -167,7 +167,7 @@ impl<Prov> Scalar<Prov> {
 
     #[inline]
     pub fn from_target_usize(i: u64, cx: &impl HasDataLayout) -> Self {
-        Self::from_uint(i, cx.data_layout().pointer_size)
+        Self::from_uint(i, cx.data_layout().pointer_offset())
     }
 
     #[inline]
@@ -205,7 +205,7 @@ impl<Prov> Scalar<Prov> {
 
     #[inline]
     pub fn from_target_isize(i: i64, cx: &impl HasDataLayout) -> Self {
-        Self::from_int(i, cx.data_layout().pointer_size)
+        Self::from_int(i, cx.data_layout().pointer_offset())
     }
 
     #[inline]
@@ -393,7 +393,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
     /// Converts the scalar to produce a machine-pointer-sized unsigned integer.
     /// Fails if the scalar is a pointer.
     pub fn to_target_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
-        let b = self.to_uint(cx.data_layout().pointer_size)?;
+        let b = self.to_uint(cx.data_layout().pointer_size())?;
         interp_ok(u64::try_from(b).unwrap())
     }
 
@@ -433,7 +433,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
     /// Converts the scalar to produce a machine-pointer-sized signed integer.
     /// Fails if the scalar is a pointer.
     pub fn to_target_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> {
-        let b = self.to_int(cx.data_layout().pointer_size)?;
+        let b = self.to_int(cx.data_layout().pointer_size())?;
         interp_ok(i64::try_from(b).unwrap())
     }
 
diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs
index 6b262a27500..8e403dfddae 100644
--- a/compiler/rustc_middle/src/mir/pretty.rs
+++ b/compiler/rustc_middle/src/mir/pretty.rs
@@ -1621,6 +1621,7 @@ pub fn write_allocations<'tcx>(
             Some(GlobalAlloc::VTable(ty, dyn_ty)) => {
                 write!(w, " (vtable: impl {dyn_ty} for {ty})")?
             }
+            Some(GlobalAlloc::TypeId { ty }) => write!(w, " (typeid for {ty})")?,
             Some(GlobalAlloc::Static(did)) if !tcx.is_foreign_item(did) => {
                 write!(w, " (static: {}", tcx.def_path_str(did))?;
                 if body.phase <= MirPhase::Runtime(RuntimePhase::PostCleanup)
@@ -1753,7 +1754,7 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
     let mut i = Size::ZERO;
     let mut line_start = Size::ZERO;
 
-    let ptr_size = tcx.data_layout.pointer_size;
+    let ptr_size = tcx.data_layout.pointer_size();
 
     let mut ascii = String::new();
 
diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs
index 17a29c9ae4b..9af5683ff75 100644
--- a/compiler/rustc_middle/src/query/mod.rs
+++ b/compiler/rustc_middle/src/query/mod.rs
@@ -1452,6 +1452,13 @@ rustc_queries! {
         feedable
     }
 
+    /// Gets the span for the type of the definition.
+    /// Panics if it is not a definition that has a single type.
+    query ty_span(def_id: LocalDefId) -> Span {
+        desc { |tcx| "looking up span for `{}`'s type", tcx.def_path_str(def_id) }
+        cache_on_disk_if { true }
+    }
+
     query lookup_stability(def_id: DefId) -> Option<attr::Stability> {
         desc { |tcx| "looking up stability of `{}`", tcx.def_path_str(def_id) }
         cache_on_disk_if { def_id.is_local() }
diff --git a/compiler/rustc_middle/src/traits/select.rs b/compiler/rustc_middle/src/traits/select.rs
index aa2ee756bc5..c498e6b3c83 100644
--- a/compiler/rustc_middle/src/traits/select.rs
+++ b/compiler/rustc_middle/src/traits/select.rs
@@ -97,9 +97,7 @@ pub type EvaluationCache<'tcx, ENV> = Cache<(ENV, ty::PolyTraitPredicate<'tcx>),
 pub enum SelectionCandidate<'tcx> {
     /// A built-in implementation for the `Sized` trait. This is preferred
     /// over all other candidates.
-    SizedCandidate {
-        has_nested: bool,
-    },
+    SizedCandidate,
 
     /// A builtin implementation for some specific traits, used in cases
     /// where we cannot rely an ordinary library implementations.
@@ -107,10 +105,7 @@ pub enum SelectionCandidate<'tcx> {
     /// The most notable examples are `Copy` and `Clone`. This is also
     /// used for the `DiscriminantKind` and `Pointee` trait, both of which have
     /// an associated type.
-    BuiltinCandidate {
-        /// `false` if there are no *further* obligations.
-        has_nested: bool,
-    },
+    BuiltinCandidate,
 
     /// Implementation of transmutability trait.
     TransmutabilityCandidate,
diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs
index b087ae25486..6ee76b94507 100644
--- a/compiler/rustc_middle/src/ty/consts/int.rs
+++ b/compiler/rustc_middle/src/ty/consts/int.rs
@@ -252,7 +252,7 @@ impl ScalarInt {
 
     #[inline]
     pub fn try_from_target_usize(i: impl Into<u128>, tcx: TyCtxt<'_>) -> Option<Self> {
-        Self::try_from_uint(i, tcx.data_layout.pointer_size)
+        Self::try_from_uint(i, tcx.data_layout.pointer_size())
     }
 
     /// Try to convert this ScalarInt to the raw underlying bits.
@@ -328,7 +328,7 @@ impl ScalarInt {
 
     #[inline]
     pub fn to_target_usize(&self, tcx: TyCtxt<'_>) -> u64 {
-        self.to_uint(tcx.data_layout.pointer_size).try_into().unwrap()
+        self.to_uint(tcx.data_layout.pointer_size()).try_into().unwrap()
     }
 
     #[inline]
@@ -402,7 +402,7 @@ impl ScalarInt {
 
     #[inline]
     pub fn to_target_isize(&self, tcx: TyCtxt<'_>) -> i64 {
-        self.to_int(tcx.data_layout.pointer_size).try_into().unwrap()
+        self.to_int(tcx.data_layout.pointer_size()).try_into().unwrap()
     }
 
     #[inline]
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index 09379d9d805..809717513c7 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -1067,7 +1067,7 @@ where
                 if let Some(variant) = data_variant {
                     // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
                     // (requires passing in the expected address space from the caller)
-                    let ptr_end = offset + Primitive::Pointer(AddressSpace::DATA).size(cx);
+                    let ptr_end = offset + Primitive::Pointer(AddressSpace::ZERO).size(cx);
                     for i in 0..variant.fields.count() {
                         let field_start = variant.fields.offset(i);
                         if field_start <= offset {
diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs
index 1dba4a7b040..7c48a4b0885 100644
--- a/compiler/rustc_middle/src/ty/print/pretty.rs
+++ b/compiler/rustc_middle/src/ty/print/pretty.rs
@@ -1773,6 +1773,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
                         }
                         Some(GlobalAlloc::Function { .. }) => p!("<function>"),
                         Some(GlobalAlloc::VTable(..)) => p!("<vtable>"),
+                        Some(GlobalAlloc::TypeId { .. }) => p!("<typeid>"),
                         None => p!("<dangling pointer>"),
                     }
                     return Ok(());
@@ -1842,7 +1843,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
             }
             // Pointer types
             ty::Ref(..) | ty::RawPtr(_, _) | ty::FnPtr(..) => {
-                let data = int.to_bits(self.tcx().data_layout.pointer_size);
+                let data = int.to_bits(self.tcx().data_layout.pointer_size());
                 self.typed_value(
                     |this| {
                         write!(this, "0x{data:x}")?;
diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs
index 69b8be3d9cb..fd80d85f198 100644
--- a/compiler/rustc_middle/src/ty/util.rs
+++ b/compiler/rustc_middle/src/ty/util.rs
@@ -1052,9 +1052,11 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for FreeAliasTypeExpander<'tcx> {
         }
 
         self.depth += 1;
-        ensure_sufficient_stack(|| {
+        let ty = ensure_sufficient_stack(|| {
             self.tcx.type_of(alias.def_id).instantiate(self.tcx, alias.args).fold_with(self)
-        })
+        });
+        self.depth -= 1;
+        ty
     }
 
     fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
diff --git a/compiler/rustc_middle/src/ty/vtable.rs b/compiler/rustc_middle/src/ty/vtable.rs
index 74b6a840a2e..6fc19c82342 100644
--- a/compiler/rustc_middle/src/ty/vtable.rs
+++ b/compiler/rustc_middle/src/ty/vtable.rs
@@ -106,8 +106,8 @@ pub(super) fn vtable_allocation_provider<'tcx>(
     let size = layout.size.bytes();
     let align = layout.align.abi.bytes();
 
-    let ptr_size = tcx.data_layout.pointer_size;
-    let ptr_align = tcx.data_layout.pointer_align.abi;
+    let ptr_size = tcx.data_layout.pointer_size();
+    let ptr_align = tcx.data_layout.pointer_align().abi;
 
     let vtable_size = ptr_size * u64::try_from(vtable_entries.len()).unwrap();
     let mut vtable = Allocation::new(vtable_size, ptr_align, AllocInit::Uninit, ());
diff --git a/compiler/rustc_mir_build/src/builder/scope.rs b/compiler/rustc_mir_build/src/builder/scope.rs
index 405d47c7c79..12a56d7c5ea 100644
--- a/compiler/rustc_mir_build/src/builder/scope.rs
+++ b/compiler/rustc_mir_build/src/builder/scope.rs
@@ -927,6 +927,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
             scrut_span: rustc_span::Span::default(),
             refutable: true,
             known_valid_scrutinee: true,
+            internal_state: Default::default(),
         };
 
         let valtree = match self.eval_unevaluated_mir_constant_to_valtree(constant) {
@@ -936,7 +937,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
 
                 valtree
             }
-            Err(ErrorHandled::Reported(..)) => return self.cfg.start_new_block().unit(),
+            Err(ErrorHandled::Reported(..)) => {
+                return block.unit();
+            }
             Err(ErrorHandled::TooGeneric(_)) => {
                 self.tcx.dcx().emit_fatal(ConstContinueBadConst { span: constant.span });
             }
diff --git a/compiler/rustc_mir_build/src/thir/constant.rs b/compiler/rustc_mir_build/src/thir/constant.rs
index 8e218a380e9..52e6f2d3e1a 100644
--- a/compiler/rustc_mir_build/src/thir/constant.rs
+++ b/compiler/rustc_mir_build/src/thir/constant.rs
@@ -20,7 +20,7 @@ pub(crate) fn lit_to_const<'tcx>(
 
     let trunc = |n, width: ty::UintTy| {
         let width = width
-            .normalize(tcx.data_layout.pointer_size.bits().try_into().unwrap())
+            .normalize(tcx.data_layout.pointer_size().bits().try_into().unwrap())
             .bit_width()
             .unwrap();
         let width = Size::from_bits(width);
diff --git a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
index b7b160c738d..a49bfc1b8f4 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
@@ -406,6 +406,7 @@ impl<'p, 'tcx> MatchVisitor<'p, 'tcx> {
             scrut_span,
             refutable,
             known_valid_scrutinee,
+            internal_state: Default::default(),
         }
     }
 
diff --git a/compiler/rustc_mir_dataflow/src/drop_flag_effects.rs b/compiler/rustc_mir_dataflow/src/drop_flag_effects.rs
index 496a342cf4c..c9c7fddae5a 100644
--- a/compiler/rustc_mir_dataflow/src/drop_flag_effects.rs
+++ b/compiler/rustc_mir_dataflow/src/drop_flag_effects.rs
@@ -1,5 +1,6 @@
 use rustc_abi::VariantIdx;
 use rustc_middle::mir::{self, Body, Location, Terminator, TerminatorKind};
+use smallvec::SmallVec;
 use tracing::debug;
 
 use super::move_paths::{InitKind, LookupResult, MoveData, MovePathIndex};
@@ -155,15 +156,28 @@ where
     }
 }
 
-/// Calls `handle_inactive_variant` for each descendant move path of `enum_place` that contains a
-/// `Downcast` to a variant besides the `active_variant`.
-///
-/// NOTE: If there are no move paths corresponding to an inactive variant,
-/// `handle_inactive_variant` will not be called for that variant.
+/// Indicates which variants are inactive at a `SwitchInt` edge by listing their `VariantIdx`s or
+/// specifying the single active variant's `VariantIdx`.
+pub(crate) enum InactiveVariants {
+    Inactives(SmallVec<[VariantIdx; 4]>),
+    Active(VariantIdx),
+}
+
+impl InactiveVariants {
+    fn contains(&self, variant_idx: VariantIdx) -> bool {
+        match self {
+            InactiveVariants::Inactives(inactives) => inactives.contains(&variant_idx),
+            InactiveVariants::Active(active) => variant_idx != *active,
+        }
+    }
+}
+
+/// Calls `handle_inactive_variant` for each child move path of `enum_place` corresponding to an
+/// inactive variant at a particular `SwitchInt` edge.
 pub(crate) fn on_all_inactive_variants<'tcx>(
     move_data: &MoveData<'tcx>,
     enum_place: mir::Place<'tcx>,
-    active_variant: VariantIdx,
+    inactive_variants: &InactiveVariants,
     mut handle_inactive_variant: impl FnMut(MovePathIndex),
 ) {
     let LookupResult::Exact(enum_mpi) = move_data.rev_lookup.find(enum_place.as_ref()) else {
@@ -182,7 +196,7 @@ pub(crate) fn on_all_inactive_variants<'tcx>(
             unreachable!();
         };
 
-        if variant_idx != active_variant {
+        if inactive_variants.contains(variant_idx) {
             on_all_children_bits(move_data, variant_mpi, |mpi| handle_inactive_variant(mpi));
         }
     }
diff --git a/compiler/rustc_mir_dataflow/src/framework/direction.rs b/compiler/rustc_mir_dataflow/src/framework/direction.rs
index e955e38ad10..cb647476db8 100644
--- a/compiler/rustc_mir_dataflow/src/framework/direction.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/direction.rs
@@ -112,12 +112,13 @@ impl Direction for Backward {
                     propagate(pred, &tmp);
                 }
 
-                mir::TerminatorKind::SwitchInt { targets: _, ref discr } => {
+                mir::TerminatorKind::SwitchInt { ref targets, ref discr } => {
                     if let Some(mut data) = analysis.get_switch_int_data(block, discr) {
                         let mut tmp = analysis.bottom_value(body);
                         for &value in &body.basic_blocks.switch_sources()[&(block, pred)] {
                             tmp.clone_from(exit_state);
-                            analysis.apply_switch_int_edge_effect(&mut data, &mut tmp, value);
+                            analysis
+                                .apply_switch_int_edge_effect(&mut data, &mut tmp, value, targets);
                             propagate(pred, &tmp);
                         }
                     } else {
@@ -290,20 +291,20 @@ impl Direction for Forward {
                     for (value, target) in targets.iter() {
                         tmp.clone_from(exit_state);
                         let value = SwitchTargetValue::Normal(value);
-                        analysis.apply_switch_int_edge_effect(&mut data, &mut tmp, value);
+                        analysis.apply_switch_int_edge_effect(&mut data, &mut tmp, value, targets);
                         propagate(target, &tmp);
                     }
 
                     // Once we get to the final, "otherwise" branch, there is no need to preserve
                     // `exit_state`, so pass it directly to `apply_switch_int_edge_effect` to save
                     // a clone of the dataflow state.
-                    let otherwise = targets.otherwise();
                     analysis.apply_switch_int_edge_effect(
                         &mut data,
                         exit_state,
                         SwitchTargetValue::Otherwise,
+                        targets,
                     );
-                    propagate(otherwise, exit_state);
+                    propagate(targets.otherwise(), exit_state);
                 } else {
                     for target in targets.all_targets() {
                         propagate(*target, exit_state);
diff --git a/compiler/rustc_mir_dataflow/src/framework/mod.rs b/compiler/rustc_mir_dataflow/src/framework/mod.rs
index 9cadec100b5..b6a56036019 100644
--- a/compiler/rustc_mir_dataflow/src/framework/mod.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/mod.rs
@@ -224,6 +224,7 @@ pub trait Analysis<'tcx> {
         _data: &mut Self::SwitchIntData,
         _state: &mut Self::Domain,
         _value: SwitchTargetValue,
+        _targets: &mir::SwitchTargets,
     ) {
         unreachable!();
     }
diff --git a/compiler/rustc_mir_dataflow/src/impls/initialized.rs b/compiler/rustc_mir_dataflow/src/impls/initialized.rs
index 18165b0b9bd..085757f0fb6 100644
--- a/compiler/rustc_mir_dataflow/src/impls/initialized.rs
+++ b/compiler/rustc_mir_dataflow/src/impls/initialized.rs
@@ -9,9 +9,10 @@ use rustc_middle::mir::{
 };
 use rustc_middle::ty::util::Discr;
 use rustc_middle::ty::{self, TyCtxt};
+use smallvec::SmallVec;
 use tracing::{debug, instrument};
 
-use crate::drop_flag_effects::DropFlagState;
+use crate::drop_flag_effects::{DropFlagState, InactiveVariants};
 use crate::move_paths::{HasMoveData, InitIndex, InitKind, LookupResult, MoveData, MovePathIndex};
 use crate::{
     Analysis, GenKill, MaybeReachable, drop_flag_effects, drop_flag_effects_for_function_entry,
@@ -26,6 +27,12 @@ pub struct MaybePlacesSwitchIntData<'tcx> {
 }
 
 impl<'tcx> MaybePlacesSwitchIntData<'tcx> {
+    /// Creates a `SmallVec` mapping each target in `targets` to its `VariantIdx`.
+    fn variants(&mut self, targets: &mir::SwitchTargets) -> SmallVec<[VariantIdx; 4]> {
+        self.index = 0;
+        targets.all_values().iter().map(|value| self.next_discr(value.get())).collect()
+    }
+
     // The discriminant order in the `SwitchInt` targets should match the order yielded by
     // `AdtDef::discriminants`. We rely on this to match each discriminant in the targets to its
     // corresponding variant in linear time.
@@ -131,12 +138,26 @@ pub struct MaybeInitializedPlaces<'a, 'tcx> {
     tcx: TyCtxt<'tcx>,
     body: &'a Body<'tcx>,
     move_data: &'a MoveData<'tcx>,
+    exclude_inactive_in_otherwise: bool,
     skip_unreachable_unwind: bool,
 }
 
 impl<'a, 'tcx> MaybeInitializedPlaces<'a, 'tcx> {
     pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, move_data: &'a MoveData<'tcx>) -> Self {
-        MaybeInitializedPlaces { tcx, body, move_data, skip_unreachable_unwind: false }
+        MaybeInitializedPlaces {
+            tcx,
+            body,
+            move_data,
+            exclude_inactive_in_otherwise: false,
+            skip_unreachable_unwind: false,
+        }
+    }
+
+    /// Ensures definitely inactive variants are excluded from the set of initialized places for
+    /// blocks reached through an `otherwise` edge.
+    pub fn exclude_inactive_in_otherwise(mut self) -> Self {
+        self.exclude_inactive_in_otherwise = true;
+        self
     }
 
     pub fn skipping_unreachable_unwind(mut self) -> Self {
@@ -208,6 +229,7 @@ pub struct MaybeUninitializedPlaces<'a, 'tcx> {
     move_data: &'a MoveData<'tcx>,
 
     mark_inactive_variants_as_uninit: bool,
+    include_inactive_in_otherwise: bool,
     skip_unreachable_unwind: DenseBitSet<mir::BasicBlock>,
 }
 
@@ -218,6 +240,7 @@ impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> {
             body,
             move_data,
             mark_inactive_variants_as_uninit: false,
+            include_inactive_in_otherwise: false,
             skip_unreachable_unwind: DenseBitSet::new_empty(body.basic_blocks.len()),
         }
     }
@@ -232,6 +255,13 @@ impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> {
         self
     }
 
+    /// Ensures definitely inactive variants are included in the set of uninitialized places for
+    /// blocks reached through an `otherwise` edge.
+    pub fn include_inactive_in_otherwise(mut self) -> Self {
+        self.include_inactive_in_otherwise = true;
+        self
+    }
+
     pub fn skipping_unreachable_unwind(
         mut self,
         unreachable_unwind: DenseBitSet<mir::BasicBlock>,
@@ -431,17 +461,24 @@ impl<'tcx> Analysis<'tcx> for MaybeInitializedPlaces<'_, 'tcx> {
         data: &mut Self::SwitchIntData,
         state: &mut Self::Domain,
         value: SwitchTargetValue,
+        targets: &mir::SwitchTargets,
     ) {
-        if let SwitchTargetValue::Normal(value) = value {
-            // Kill all move paths that correspond to variants we know to be inactive along this
-            // particular outgoing edge of a `SwitchInt`.
-            drop_flag_effects::on_all_inactive_variants(
-                self.move_data,
-                data.enum_place,
-                data.next_discr(value),
-                |mpi| state.kill(mpi),
-            );
-        }
+        let inactive_variants = match value {
+            SwitchTargetValue::Normal(value) => InactiveVariants::Active(data.next_discr(value)),
+            SwitchTargetValue::Otherwise if self.exclude_inactive_in_otherwise => {
+                InactiveVariants::Inactives(data.variants(targets))
+            }
+            _ => return,
+        };
+
+        // Kill all move paths that correspond to variants we know to be inactive along this
+        // particular outgoing edge of a `SwitchInt`.
+        drop_flag_effects::on_all_inactive_variants(
+            self.move_data,
+            data.enum_place,
+            &inactive_variants,
+            |mpi| state.kill(mpi),
+        );
     }
 }
 
@@ -544,17 +581,24 @@ impl<'tcx> Analysis<'tcx> for MaybeUninitializedPlaces<'_, 'tcx> {
         data: &mut Self::SwitchIntData,
         state: &mut Self::Domain,
         value: SwitchTargetValue,
+        targets: &mir::SwitchTargets,
     ) {
-        if let SwitchTargetValue::Normal(value) = value {
-            // Mark all move paths that correspond to variants other than this one as maybe
-            // uninitialized (in reality, they are *definitely* uninitialized).
-            drop_flag_effects::on_all_inactive_variants(
-                self.move_data,
-                data.enum_place,
-                data.next_discr(value),
-                |mpi| state.gen_(mpi),
-            );
-        }
+        let inactive_variants = match value {
+            SwitchTargetValue::Normal(value) => InactiveVariants::Active(data.next_discr(value)),
+            SwitchTargetValue::Otherwise if self.include_inactive_in_otherwise => {
+                InactiveVariants::Inactives(data.variants(targets))
+            }
+            _ => return,
+        };
+
+        // Mark all move paths that correspond to variants other than this one as maybe
+        // uninitialized (in reality, they are *definitely* uninitialized).
+        drop_flag_effects::on_all_inactive_variants(
+            self.move_data,
+            data.enum_place,
+            &inactive_variants,
+            |mpi| state.gen_(mpi),
+        );
     }
 }
 
diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs
index 42c8cb0b906..b4fa2be1d00 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs
@@ -62,12 +62,14 @@ impl<'tcx> crate::MirPass<'tcx> for ElaborateDrops {
             let env = MoveDataTypingEnv { move_data, typing_env };
 
             let mut inits = MaybeInitializedPlaces::new(tcx, body, &env.move_data)
+                .exclude_inactive_in_otherwise()
                 .skipping_unreachable_unwind()
                 .iterate_to_fixpoint(tcx, body, Some("elaborate_drops"))
                 .into_results_cursor(body);
             let dead_unwinds = compute_dead_unwinds(body, &mut inits);
 
             let uninits = MaybeUninitializedPlaces::new(tcx, body, &env.move_data)
+                .include_inactive_in_otherwise()
                 .mark_inactive_variants_as_uninit()
                 .skipping_unreachable_unwind(dead_unwinds)
                 .iterate_to_fixpoint(tcx, body, Some("elaborate_drops"))
diff --git a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
index 9044a88295c..25d6fa1bab9 100644
--- a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
+++ b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
@@ -22,6 +22,7 @@ impl<'tcx> crate::MirPass<'tcx> for RemoveUninitDrops {
         let move_data = MoveData::gather_moves(body, tcx, |ty| ty.needs_drop(tcx, typing_env));
 
         let mut maybe_inits = MaybeInitializedPlaces::new(tcx, body, &move_data)
+            .exclude_inactive_in_otherwise()
             .iterate_to_fixpoint(tcx, body, Some("remove_uninit_drops"))
             .into_results_cursor(body);
 
diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs
index 131064f9832..91c8e64ce9a 100644
--- a/compiler/rustc_monomorphize/src/collector.rs
+++ b/compiler/rustc_monomorphize/src/collector.rs
@@ -1219,6 +1219,7 @@ fn collect_alloc<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut MonoIt
             ));
             collect_alloc(tcx, alloc_id, output)
         }
+        GlobalAlloc::TypeId { .. } => {}
     }
 }
 
diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs
index 8ea535599c9..2050c5f9608 100644
--- a/compiler/rustc_parse/src/lib.rs
+++ b/compiler/rustc_parse/src/lib.rs
@@ -1,7 +1,6 @@
 //! The main parser interface.
 
 // tidy-alphabetical-start
-#![allow(internal_features)]
 #![allow(rustc::diagnostic_outside_of_impl)]
 #![allow(rustc::untranslatable_diagnostic)]
 #![feature(assert_matches)]
diff --git a/compiler/rustc_parse/src/parser/item.rs b/compiler/rustc_parse/src/parser/item.rs
index 9ed7124a11c..d6cc98d505c 100644
--- a/compiler/rustc_parse/src/parser/item.rs
+++ b/compiler/rustc_parse/src/parser/item.rs
@@ -2206,7 +2206,7 @@ impl<'a> Parser<'a> {
 
             if self.look_ahead(1, |t| *t == token::Bang) && self.look_ahead(2, |t| t.is_ident()) {
                 return IsMacroRulesItem::Yes { has_bang: true };
-            } else if self.look_ahead(1, |t| (t.is_ident())) {
+            } else if self.look_ahead(1, |t| t.is_ident()) {
                 // macro_rules foo
                 self.dcx().emit_err(errors::MacroRulesMissingBang {
                     span: macro_rules_span,
diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs
index cfc0399b0ca..2787be46f33 100644
--- a/compiler/rustc_parse/src/parser/mod.rs
+++ b/compiler/rustc_parse/src/parser/mod.rs
@@ -1293,8 +1293,10 @@ impl<'a> Parser<'a> {
         let kind = if pat {
             let guar = self
                 .dcx()
-                .struct_span_err(blk_span, "`inline_const_pat` has been removed")
-                .with_help("use a named `const`-item or an `if`-guard instead")
+                .struct_span_err(blk_span, "const blocks cannot be used as patterns")
+                .with_help(
+                    "use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead",
+                )
                 .emit();
             ExprKind::Err(guar)
         } else {
diff --git a/compiler/rustc_parse/src/validate_attr.rs b/compiler/rustc_parse/src/validate_attr.rs
index 67b68e77d2b..11424ec3724 100644
--- a/compiler/rustc_parse/src/validate_attr.rs
+++ b/compiler/rustc_parse/src/validate_attr.rs
@@ -271,6 +271,10 @@ pub fn check_builtin_meta_item(
         if matches!(
             name,
             sym::inline
+                | sym::export_stable
+                | sym::ffi_const
+                | sym::ffi_pure
+                | sym::rustc_std_internal_symbol
                 | sym::may_dangle
                 | sym::rustc_as_ptr
                 | sym::rustc_pub_transparent
diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs
index 9e4e78c1db6..0aa6a2b41cf 100644
--- a/compiler/rustc_passes/src/check_attr.rs
+++ b/compiler/rustc_passes/src/check_attr.rs
@@ -204,10 +204,20 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
                     AttributeKind::RustcLayoutScalarValidRangeStart(_num, attr_span)
                     | AttributeKind::RustcLayoutScalarValidRangeEnd(_num, attr_span),
                 ) => self.check_rustc_layout_scalar_valid_range(*attr_span, span, target),
+                Attribute::Parsed(AttributeKind::ExportStable) => {
+                    // handled in `check_export`
+                }
+                &Attribute::Parsed(AttributeKind::FfiConst(attr_span)) => {
+                    self.check_ffi_const(attr_span, target)
+                }
+                &Attribute::Parsed(AttributeKind::FfiPure(attr_span)) => {
+                    self.check_ffi_pure(attr_span, attrs, target)
+                }
                 Attribute::Parsed(
                     AttributeKind::BodyStability { .. }
                     | AttributeKind::ConstStabilityIndirect
-                    | AttributeKind::MacroTransparency(_),
+                    | AttributeKind::MacroTransparency(_)
+                    | AttributeKind::Dummy,
                 ) => { /* do nothing  */ }
                 Attribute::Parsed(AttributeKind::AsPtr(attr_span)) => {
                     self.check_applied_to_fn_or_method(hir_id, *attr_span, span, target)
@@ -233,6 +243,9 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
                 &Attribute::Parsed(AttributeKind::PassByValue(attr_span)) => {
                     self.check_pass_by_value(attr_span, span, target)
                 }
+                &Attribute::Parsed(AttributeKind::StdInternalSymbol(attr_span)) => {
+                    self.check_rustc_std_internal_symbol(attr_span, span, target)
+                }
                 Attribute::Unparsed(attr_item) => {
                     style = Some(attr_item.style);
                     match attr.path().as_slice() {
@@ -258,9 +271,6 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
                         ),
                         [sym::no_link, ..] => self.check_no_link(hir_id, attr, span, target),
                         [sym::debugger_visualizer, ..] => self.check_debugger_visualizer(attr, target),
-                        [sym::rustc_std_internal_symbol, ..] => {
-                            self.check_rustc_std_internal_symbol(attr, span, target)
-                        }
                         [sym::rustc_no_implicit_autorefs, ..] => {
                             self.check_applied_to_fn_or_method(hir_id, attr.span(), span, target)
                         }
@@ -300,8 +310,6 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
                         [sym::rustc_has_incoherent_inherent_impls, ..] => {
                             self.check_has_incoherent_inherent_impls(attr, span, target)
                         }
-                        [sym::ffi_pure, ..] => self.check_ffi_pure(attr.span(), attrs, target),
-                        [sym::ffi_const, ..] => self.check_ffi_const(attr.span(), target),
                         [sym::link_ordinal, ..] => self.check_link_ordinal(attr, span, target),
                         [sym::link, ..] => self.check_link(hir_id, attr, span, target),
                         [sym::macro_use, ..] | [sym::macro_escape, ..] => {
@@ -346,7 +354,6 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
                             | sym::cfg_attr
                             | sym::cfg_trace
                             | sym::cfg_attr_trace
-                            | sym::export_stable // handled in `check_export`
                             // need to be fixed
                             | sym::cfi_encoding // FIXME(cfi_encoding)
                             | sym::pointee // FIXME(derive_coerce_pointee)
@@ -1507,7 +1514,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
             self.dcx().emit_err(errors::FfiPureInvalidTarget { attr_span });
             return;
         }
-        if attrs.iter().any(|a| a.has_name(sym::ffi_const)) {
+        if find_attr!(attrs, AttributeKind::FfiConst(_)) {
             // `#[ffi_const]` functions cannot be `#[ffi_pure]`
             self.dcx().emit_err(errors::BothFfiConstAndPure { attr_span });
         }
@@ -2214,13 +2221,11 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
         }
     }
 
-    fn check_rustc_std_internal_symbol(&self, attr: &Attribute, span: Span, target: Target) {
+    fn check_rustc_std_internal_symbol(&self, attr_span: Span, span: Span, target: Target) {
         match target {
             Target::Fn | Target::Static | Target::ForeignFn | Target::ForeignStatic => {}
             _ => {
-                self.tcx
-                    .dcx()
-                    .emit_err(errors::RustcStdInternalSymbol { attr_span: attr.span(), span });
+                self.tcx.dcx().emit_err(errors::RustcStdInternalSymbol { attr_span, span });
             }
         }
     }
diff --git a/compiler/rustc_passes/src/check_export.rs b/compiler/rustc_passes/src/check_export.rs
index f8f489d7d06..b1f4584c2a8 100644
--- a/compiler/rustc_passes/src/check_export.rs
+++ b/compiler/rustc_passes/src/check_export.rs
@@ -2,6 +2,7 @@ use std::iter;
 use std::ops::ControlFlow;
 
 use rustc_abi::ExternAbi;
+use rustc_attr_data_structures::{AttributeKind, find_attr};
 use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
 use rustc_hir as hir;
 use rustc_hir::def::DefKind;
@@ -14,7 +15,7 @@ use rustc_middle::ty::{
     self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor, Visibility,
 };
 use rustc_session::config::CrateType;
-use rustc_span::{Span, sym};
+use rustc_span::Span;
 
 use crate::errors::UnexportableItem;
 
@@ -44,7 +45,7 @@ impl<'tcx> ExportableItemCollector<'tcx> {
     }
 
     fn item_is_exportable(&self, def_id: LocalDefId) -> bool {
-        let has_attr = self.tcx.has_attr(def_id, sym::export_stable);
+        let has_attr = find_attr!(self.tcx.get_all_attrs(def_id), AttributeKind::ExportStable);
         if !self.in_exportable_mod && !has_attr {
             return false;
         }
@@ -80,7 +81,7 @@ impl<'tcx> ExportableItemCollector<'tcx> {
     fn walk_item_with_mod(&mut self, item: &'tcx hir::Item<'tcx>) {
         let def_id = item.hir_id().owner.def_id;
         let old_exportable_mod = self.in_exportable_mod;
-        if self.tcx.get_attr(def_id, sym::export_stable).is_some() {
+        if find_attr!(self.tcx.get_all_attrs(def_id), AttributeKind::ExportStable) {
             self.in_exportable_mod = true;
         }
         let old_seen_exportable_in_mod = std::mem::replace(&mut self.seen_exportable_in_mod, false);
diff --git a/compiler/rustc_passes/src/reachable.rs b/compiler/rustc_passes/src/reachable.rs
index 7e15267a953..b49e8118fe3 100644
--- a/compiler/rustc_passes/src/reachable.rs
+++ b/compiler/rustc_passes/src/reachable.rs
@@ -325,6 +325,7 @@ impl<'tcx> ReachableContext<'tcx> {
                         self.visit(args);
                     }
                 }
+                GlobalAlloc::TypeId { ty, .. } => self.visit(ty),
                 GlobalAlloc::Memory(alloc) => self.propagate_from_alloc(alloc),
             }
         }
diff --git a/compiler/rustc_pattern_analysis/src/checks.rs b/compiler/rustc_pattern_analysis/src/checks.rs
new file mode 100644
index 00000000000..88ccaa1e0e5
--- /dev/null
+++ b/compiler/rustc_pattern_analysis/src/checks.rs
@@ -0,0 +1,50 @@
+//! Contains checks that must be run to validate matches before performing usefulness analysis.
+
+use crate::constructor::Constructor::*;
+use crate::pat_column::PatternColumn;
+use crate::{MatchArm, PatCx};
+
+/// Validate that deref patterns and normal constructors aren't used to match on the same place.
+pub(crate) fn detect_mixed_deref_pat_ctors<'p, Cx: PatCx>(
+    cx: &Cx,
+    arms: &[MatchArm<'p, Cx>],
+) -> Result<(), Cx::Error> {
+    let pat_column = PatternColumn::new(arms);
+    detect_mixed_deref_pat_ctors_inner(cx, &pat_column)
+}
+
+fn detect_mixed_deref_pat_ctors_inner<'p, Cx: PatCx>(
+    cx: &Cx,
+    column: &PatternColumn<'p, Cx>,
+) -> Result<(), Cx::Error> {
+    let Some(ty) = column.head_ty() else {
+        return Ok(());
+    };
+
+    // Check for a mix of deref patterns and normal constructors.
+    let mut deref_pat = None;
+    let mut normal_pat = None;
+    for pat in column.iter() {
+        match pat.ctor() {
+            // The analysis can handle mixing deref patterns with wildcards and opaque patterns.
+            Wildcard | Opaque(_) => {}
+            DerefPattern(_) => deref_pat = Some(pat),
+            // Nothing else can be compared to deref patterns in `Constructor::is_covered_by`.
+            _ => normal_pat = Some(pat),
+        }
+    }
+    if let Some(deref_pat) = deref_pat
+        && let Some(normal_pat) = normal_pat
+    {
+        return Err(cx.report_mixed_deref_pat_ctors(deref_pat, normal_pat));
+    }
+
+    // Specialize and recurse into the patterns' fields.
+    let set = column.analyze_ctors(cx, &ty)?;
+    for ctor in set.present {
+        for specialized_column in column.specialize(cx, &ty, &ctor).iter() {
+            detect_mixed_deref_pat_ctors_inner(cx, specialized_column)?;
+        }
+    }
+    Ok(())
+}
diff --git a/compiler/rustc_pattern_analysis/src/lib.rs b/compiler/rustc_pattern_analysis/src/lib.rs
index 2b85d7b26ce..66df35f9ee4 100644
--- a/compiler/rustc_pattern_analysis/src/lib.rs
+++ b/compiler/rustc_pattern_analysis/src/lib.rs
@@ -8,6 +8,7 @@
 #![allow(unused_crate_dependencies)]
 // tidy-alphabetical-end
 
+pub(crate) mod checks;
 pub mod constructor;
 #[cfg(feature = "rustc")]
 pub mod errors;
@@ -107,6 +108,20 @@ pub trait PatCx: Sized + fmt::Debug {
         _gapped_with: &[&DeconstructedPat<Self>],
     ) {
     }
+
+    /// Check if we may need to perform additional deref-pattern-specific validation.
+    fn match_may_contain_deref_pats(&self) -> bool {
+        true
+    }
+
+    /// The current implementation of deref patterns requires that they can't match on the same
+    /// place as a normal constructor. Since this isn't caught by type-checking, we check it in the
+    /// `PatCx` before running the analysis. This reports an error if the check fails.
+    fn report_mixed_deref_pat_ctors(
+        &self,
+        deref_pat: &DeconstructedPat<Self>,
+        normal_pat: &DeconstructedPat<Self>,
+    ) -> Self::Error;
 }
 
 /// The arm of a match expression.
diff --git a/compiler/rustc_pattern_analysis/src/rustc.rs b/compiler/rustc_pattern_analysis/src/rustc.rs
index e53cebc59ba..ee72b676b38 100644
--- a/compiler/rustc_pattern_analysis/src/rustc.rs
+++ b/compiler/rustc_pattern_analysis/src/rustc.rs
@@ -1,3 +1,4 @@
+use std::cell::Cell;
 use std::fmt;
 use std::iter::once;
 
@@ -99,6 +100,16 @@ pub struct RustcPatCtxt<'p, 'tcx: 'p> {
     /// Whether the data at the scrutinee is known to be valid. This is false if the scrutinee comes
     /// from a union field, a pointer deref, or a reference deref (pending opsem decisions).
     pub known_valid_scrutinee: bool,
+    pub internal_state: RustcPatCtxtState,
+}
+
+/// Private fields of [`RustcPatCtxt`], separated out to permit record initialization syntax.
+#[derive(Clone, Default)]
+pub struct RustcPatCtxtState {
+    /// Has a deref pattern been lowered? This is initialized to `false` and is updated by
+    /// [`RustcPatCtxt::lower_pat`] in order to avoid performing deref-pattern-specific validation
+    /// for everything containing patterns.
+    has_lowered_deref_pat: Cell<bool>,
 }
 
 impl<'p, 'tcx: 'p> fmt::Debug for RustcPatCtxt<'p, 'tcx> {
@@ -474,6 +485,7 @@ impl<'p, 'tcx: 'p> RustcPatCtxt<'p, 'tcx> {
                 fields = vec![self.lower_pat(subpattern).at_index(0)];
                 arity = 1;
                 ctor = DerefPattern(cx.reveal_opaque_ty(subpattern.ty));
+                self.internal_state.has_lowered_deref_pat.set(true);
             }
             PatKind::Leaf { subpatterns } | PatKind::Variant { subpatterns, .. } => {
                 match ty.kind() {
@@ -1027,6 +1039,25 @@ impl<'p, 'tcx: 'p> PatCx for RustcPatCtxt<'p, 'tcx> {
             );
         }
     }
+
+    fn match_may_contain_deref_pats(&self) -> bool {
+        self.internal_state.has_lowered_deref_pat.get()
+    }
+
+    fn report_mixed_deref_pat_ctors(
+        &self,
+        deref_pat: &crate::pat::DeconstructedPat<Self>,
+        normal_pat: &crate::pat::DeconstructedPat<Self>,
+    ) -> Self::Error {
+        let deref_pattern_label = deref_pat.data().span;
+        let normal_constructor_label = normal_pat.data().span;
+        self.tcx.dcx().emit_err(errors::MixedDerefPatternConstructors {
+            spans: vec![deref_pattern_label, normal_constructor_label],
+            smart_pointer_ty: deref_pat.ty().inner(),
+            deref_pattern_label,
+            normal_constructor_label,
+        })
+    }
 }
 
 /// Recursively expand this pattern into its subpatterns. Only useful for or-patterns.
@@ -1055,13 +1086,6 @@ pub fn analyze_match<'p, 'tcx>(
 ) -> Result<UsefulnessReport<'p, 'tcx>, ErrorGuaranteed> {
     let scrut_ty = tycx.reveal_opaque_ty(scrut_ty);
 
-    // The analysis doesn't support deref patterns mixed with normal constructors; error if present.
-    // FIXME(deref_patterns): This only needs to run when a deref pattern was found during lowering.
-    if tycx.tcx.features().deref_patterns() {
-        let pat_column = PatternColumn::new(arms);
-        detect_mixed_deref_pat_ctors(tycx, &pat_column)?;
-    }
-
     let scrut_validity = PlaceValidity::from_bool(tycx.known_valid_scrutinee);
     let report = compute_match_usefulness(
         tycx,
@@ -1080,48 +1104,3 @@ pub fn analyze_match<'p, 'tcx>(
 
     Ok(report)
 }
-
-// FIXME(deref_patterns): Currently it's the responsibility of the frontend (rustc or rust-analyzer)
-// to ensure that deref patterns don't appear in the same column as normal constructors. Deref
-// patterns aren't currently implemented in rust-analyzer, but should they be, the columnwise check
-// here could be made generic and shared between frontends.
-fn detect_mixed_deref_pat_ctors<'p, 'tcx>(
-    cx: &RustcPatCtxt<'p, 'tcx>,
-    column: &PatternColumn<'p, RustcPatCtxt<'p, 'tcx>>,
-) -> Result<(), ErrorGuaranteed> {
-    let Some(&ty) = column.head_ty() else {
-        return Ok(());
-    };
-
-    // Check for a mix of deref patterns and normal constructors.
-    let mut normal_ctor_span = None;
-    let mut deref_pat_span = None;
-    for pat in column.iter() {
-        match pat.ctor() {
-            // The analysis can handle mixing deref patterns with wildcards and opaque patterns.
-            Wildcard | Opaque(_) => {}
-            DerefPattern(_) => deref_pat_span = Some(pat.data().span),
-            // Nothing else can be compared to deref patterns in `Constructor::is_covered_by`.
-            _ => normal_ctor_span = Some(pat.data().span),
-        }
-    }
-    if let Some(normal_constructor_label) = normal_ctor_span
-        && let Some(deref_pattern_label) = deref_pat_span
-    {
-        return Err(cx.tcx.dcx().emit_err(errors::MixedDerefPatternConstructors {
-            spans: vec![deref_pattern_label, normal_constructor_label],
-            smart_pointer_ty: ty.inner(),
-            deref_pattern_label,
-            normal_constructor_label,
-        }));
-    }
-
-    // Specialize and recurse into the patterns' fields.
-    let set = column.analyze_ctors(cx, &ty)?;
-    for ctor in set.present {
-        for specialized_column in column.specialize(cx, &ty, &ctor).iter() {
-            detect_mixed_deref_pat_ctors(cx, specialized_column)?;
-        }
-    }
-    Ok(())
-}
diff --git a/compiler/rustc_pattern_analysis/src/usefulness.rs b/compiler/rustc_pattern_analysis/src/usefulness.rs
index c348cd508f9..b1c646e9884 100644
--- a/compiler/rustc_pattern_analysis/src/usefulness.rs
+++ b/compiler/rustc_pattern_analysis/src/usefulness.rs
@@ -720,7 +720,7 @@ use tracing::{debug, instrument};
 use self::PlaceValidity::*;
 use crate::constructor::{Constructor, ConstructorSet, IntRange};
 use crate::pat::{DeconstructedPat, PatId, PatOrWild, WitnessPat};
-use crate::{MatchArm, PatCx, PrivateUninhabitedField};
+use crate::{MatchArm, PatCx, PrivateUninhabitedField, checks};
 #[cfg(not(feature = "rustc"))]
 pub fn ensure_sufficient_stack<R>(f: impl FnOnce() -> R) -> R {
     f()
@@ -1836,6 +1836,11 @@ pub fn compute_match_usefulness<'p, Cx: PatCx>(
     scrut_validity: PlaceValidity,
     complexity_limit: usize,
 ) -> Result<UsefulnessReport<'p, Cx>, Cx::Error> {
+    // The analysis doesn't support deref patterns mixed with normal constructors; error if present.
+    if tycx.match_may_contain_deref_pats() {
+        checks::detect_mixed_deref_pat_ctors(tycx, arms)?;
+    }
+
     let mut cx = UsefulnessCtxt {
         tycx,
         branch_usefulness: FxHashMap::default(),
diff --git a/compiler/rustc_pattern_analysis/tests/common/mod.rs b/compiler/rustc_pattern_analysis/tests/common/mod.rs
index 8980b644f59..0b939ef7816 100644
--- a/compiler/rustc_pattern_analysis/tests/common/mod.rs
+++ b/compiler/rustc_pattern_analysis/tests/common/mod.rs
@@ -1,6 +1,7 @@
 use rustc_pattern_analysis::constructor::{
     Constructor, ConstructorSet, IntRange, MaybeInfiniteInt, RangeEnd, VariantVisibility,
 };
+use rustc_pattern_analysis::pat::DeconstructedPat;
 use rustc_pattern_analysis::usefulness::{PlaceValidity, UsefulnessReport};
 use rustc_pattern_analysis::{MatchArm, PatCx, PrivateUninhabitedField};
 
@@ -184,6 +185,14 @@ impl PatCx for Cx {
     fn complexity_exceeded(&self) -> Result<(), Self::Error> {
         Err(())
     }
+
+    fn report_mixed_deref_pat_ctors(
+        &self,
+        _deref_pat: &DeconstructedPat<Self>,
+        _normal_pat: &DeconstructedPat<Self>,
+    ) -> Self::Error {
+        panic!("`rustc_pattern_analysis::tests` currently doesn't test deref pattern errors")
+    }
 }
 
 /// Construct a single pattern; see `pats!()`.
diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs
index b7d8af2c995..306d4dbc4b4 100644
--- a/compiler/rustc_query_impl/src/lib.rs
+++ b/compiler/rustc_query_impl/src/lib.rs
@@ -2,7 +2,6 @@
 
 // tidy-alphabetical-start
 #![allow(internal_features)]
-#![allow(unused_parens)]
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
 #![doc(rust_logo)]
 #![feature(min_specialization)]
diff --git a/compiler/rustc_resolve/src/build_reduced_graph.rs b/compiler/rustc_resolve/src/build_reduced_graph.rs
index f775cac149e..ef122deba4e 100644
--- a/compiler/rustc_resolve/src/build_reduced_graph.rs
+++ b/compiler/rustc_resolve/src/build_reduced_graph.rs
@@ -162,28 +162,30 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
         }
     }
 
-    pub(crate) fn get_macro(&mut self, res: Res) -> Option<&MacroData> {
+    pub(crate) fn get_macro(&self, res: Res) -> Option<&'ra MacroData> {
         match res {
             Res::Def(DefKind::Macro(..), def_id) => Some(self.get_macro_by_def_id(def_id)),
-            Res::NonMacroAttr(_) => Some(&self.non_macro_attr),
+            Res::NonMacroAttr(_) => Some(self.non_macro_attr),
             _ => None,
         }
     }
 
-    pub(crate) fn get_macro_by_def_id(&mut self, def_id: DefId) -> &MacroData {
-        if self.macro_map.contains_key(&def_id) {
-            return &self.macro_map[&def_id];
-        }
-
-        let loaded_macro = self.cstore().load_macro_untracked(def_id, self.tcx);
-        let macro_data = match loaded_macro {
-            LoadedMacro::MacroDef { def, ident, attrs, span, edition } => {
-                self.compile_macro(&def, ident, &attrs, span, ast::DUMMY_NODE_ID, edition)
-            }
-            LoadedMacro::ProcMacro(ext) => MacroData::new(Arc::new(ext)),
-        };
+    pub(crate) fn get_macro_by_def_id(&self, def_id: DefId) -> &'ra MacroData {
+        // Local macros are always compiled.
+        match def_id.as_local() {
+            Some(local_def_id) => self.local_macro_map[&local_def_id],
+            None => *self.extern_macro_map.borrow_mut().entry(def_id).or_insert_with(|| {
+                let loaded_macro = self.cstore().load_macro_untracked(def_id, self.tcx);
+                let macro_data = match loaded_macro {
+                    LoadedMacro::MacroDef { def, ident, attrs, span, edition } => {
+                        self.compile_macro(&def, ident, &attrs, span, ast::DUMMY_NODE_ID, edition)
+                    }
+                    LoadedMacro::ProcMacro(ext) => MacroData::new(Arc::new(ext)),
+                };
 
-        self.macro_map.entry(def_id).or_insert(macro_data)
+                self.arenas.alloc_macro(macro_data)
+            }),
+        }
     }
 
     pub(crate) fn build_reduced_graph(
@@ -1203,7 +1205,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> {
     fn insert_unused_macro(&mut self, ident: Ident, def_id: LocalDefId, node_id: NodeId) {
         if !ident.as_str().starts_with('_') {
             self.r.unused_macros.insert(def_id, (node_id, ident));
-            let nrules = self.r.macro_map[&def_id.to_def_id()].nrules;
+            let nrules = self.r.local_macro_map[&def_id].nrules;
             self.r.unused_macro_rules.insert(node_id, DenseBitSet::new_filled(nrules));
         }
     }
@@ -1222,7 +1224,7 @@ impl<'a, 'ra, 'tcx> BuildReducedGraphVisitor<'a, 'ra, 'tcx> {
                     Some((macro_kind, ident, span)) => {
                         let res = Res::Def(DefKind::Macro(macro_kind), def_id.to_def_id());
                         let macro_data = MacroData::new(self.r.dummy_ext(macro_kind));
-                        self.r.macro_map.insert(def_id.to_def_id(), macro_data);
+                        self.r.new_local_macro(def_id, macro_data);
                         self.r.proc_macro_stubs.insert(def_id);
                         (res, ident, span, false)
                     }
diff --git a/compiler/rustc_resolve/src/def_collector.rs b/compiler/rustc_resolve/src/def_collector.rs
index 1e345b11c14..e2caf632dd2 100644
--- a/compiler/rustc_resolve/src/def_collector.rs
+++ b/compiler/rustc_resolve/src/def_collector.rs
@@ -165,7 +165,7 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> {
             self.create_def(i.id, i.kind.ident().map(|ident| ident.name), def_kind, i.span);
 
         if let Some(macro_data) = opt_macro_data {
-            self.resolver.macro_map.insert(def_id.to_def_id(), macro_data);
+            self.resolver.new_local_macro(def_id, macro_data);
         }
 
         self.with_parent(def_id, |this| {
diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs
index c99bc747fd2..c4ff5770e76 100644
--- a/compiler/rustc_resolve/src/diagnostics.rs
+++ b/compiler/rustc_resolve/src/diagnostics.rs
@@ -1669,9 +1669,14 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
         let mut all_attrs: UnordMap<Symbol, Vec<_>> = UnordMap::default();
         // We're collecting these in a hashmap, and handle ordering the output further down.
         #[allow(rustc::potential_query_instability)]
-        for (def_id, data) in &self.macro_map {
+        for (def_id, data) in self
+            .local_macro_map
+            .iter()
+            .map(|(local_id, data)| (local_id.to_def_id(), data))
+            .chain(self.extern_macro_map.borrow().iter().map(|(id, d)| (*id, d)))
+        {
             for helper_attr in &data.ext.helper_attrs {
-                let item_name = self.tcx.item_name(*def_id);
+                let item_name = self.tcx.item_name(def_id);
                 all_attrs.entry(*helper_attr).or_default().push(item_name);
                 if helper_attr == &ident.name {
                     derives.push(item_name);
diff --git a/compiler/rustc_resolve/src/ident.rs b/compiler/rustc_resolve/src/ident.rs
index 68fbe48ebcb..558cbef0014 100644
--- a/compiler/rustc_resolve/src/ident.rs
+++ b/compiler/rustc_resolve/src/ident.rs
@@ -13,7 +13,7 @@ use rustc_span::{Ident, Span, kw, sym};
 use tracing::{debug, instrument};
 
 use crate::errors::{ParamKindInEnumDiscriminant, ParamKindInNonTrivialAnonConst};
-use crate::imports::Import;
+use crate::imports::{Import, NameResolution};
 use crate::late::{ConstantHasGenerics, NoConstantGenericsReason, PathSource, Rib, RibKind};
 use crate::macros::{MacroRulesScope, sub_namespace_match};
 use crate::{
@@ -37,7 +37,7 @@ impl From<UsePrelude> for bool {
     }
 }
 
-#[derive(Debug, PartialEq)]
+#[derive(Debug, PartialEq, Clone, Copy)]
 enum Shadowing {
     Restricted,
     Unrestricted,
@@ -879,53 +879,15 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
             .into_iter()
             .find_map(|binding| if binding == ignore_binding { None } else { binding });
 
-        if let Some(Finalize { path_span, report_private, used, root_span, .. }) = finalize {
-            let Some(binding) = binding else {
-                return Err((Determined, Weak::No));
-            };
-
-            if !self.is_accessible_from(binding.vis, parent_scope.module) {
-                if report_private {
-                    self.privacy_errors.push(PrivacyError {
-                        ident,
-                        binding,
-                        dedup_span: path_span,
-                        outermost_res: None,
-                        parent_scope: *parent_scope,
-                        single_nested: path_span != root_span,
-                    });
-                } else {
-                    return Err((Determined, Weak::No));
-                }
-            }
-
-            // Forbid expanded shadowing to avoid time travel.
-            if let Some(shadowed_glob) = resolution.shadowed_glob
-                && shadowing == Shadowing::Restricted
-                && binding.expansion != LocalExpnId::ROOT
-                && binding.res() != shadowed_glob.res()
-            {
-                self.ambiguity_errors.push(AmbiguityError {
-                    kind: AmbiguityKind::GlobVsExpanded,
-                    ident,
-                    b1: binding,
-                    b2: shadowed_glob,
-                    warning: false,
-                    misc1: AmbiguityErrorMisc::None,
-                    misc2: AmbiguityErrorMisc::None,
-                });
-            }
-
-            if shadowing == Shadowing::Unrestricted
-                && binding.expansion != LocalExpnId::ROOT
-                && let NameBindingKind::Import { import, .. } = binding.kind
-                && matches!(import.kind, ImportKind::MacroExport)
-            {
-                self.macro_expanded_macro_export_errors.insert((path_span, binding.span));
-            }
-
-            self.record_use(ident, binding, used);
-            return Ok(binding);
+        if let Some(finalize) = finalize {
+            return self.finalize_module_binding(
+                ident,
+                binding,
+                resolution.shadowed_glob,
+                parent_scope,
+                finalize,
+                shadowing,
+            );
         }
 
         let check_usable = |this: &mut Self, binding: NameBinding<'ra>| {
@@ -944,75 +906,15 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
 
         // Check if one of single imports can still define the name,
         // if it can then our result is not determined and can be invalidated.
-        for single_import in &resolution.single_imports {
-            if ignore_import == Some(*single_import) {
-                // This branch handles a cycle in single imports.
-                //
-                // For example:
-                // ```
-                // use a::b;
-                // use b as a;
-                // ```
-                // 1. Record `use a::b` as the `ignore_import` and attempt to locate `a` in the
-                //    current module.
-                // 2. Encounter the import `use b as a`, which is a `single_import` for `a`,
-                //    and try to find `b` in the current module.
-                // 3. Re-encounter the `use a::b` import since it's a `single_import` of `b`.
-                //    This leads to entering this branch.
-                continue;
-            }
-            if !self.is_accessible_from(single_import.vis, parent_scope.module) {
-                continue;
-            }
-            if let Some(ignored) = ignore_binding
-                && let NameBindingKind::Import { import, .. } = ignored.kind
-                && import == *single_import
-            {
-                // Ignore not just the binding itself, but if it has a shadowed_glob,
-                // ignore that, too, because this loop is supposed to only process
-                // named imports.
-                continue;
-            }
-
-            let Some(module) = single_import.imported_module.get() else {
-                return Err((Undetermined, Weak::No));
-            };
-            let ImportKind::Single { source, target, target_bindings, .. } = &single_import.kind
-            else {
-                unreachable!();
-            };
-            if source != target {
-                // This branch allows the binding to be defined or updated later if the target name
-                // can hide the source.
-                if target_bindings.iter().all(|binding| binding.get().is_none()) {
-                    // None of the target bindings are available, so we can't determine
-                    // if this binding is correct or not.
-                    // See more details in #124840
-                    return Err((Undetermined, Weak::No));
-                } else if target_bindings[ns].get().is_none() && binding.is_some() {
-                    // `binding.is_some()` avoids the condition where the binding
-                    // truly doesn't exist in this namespace and should return `Err(Determined)`.
-                    return Err((Undetermined, Weak::No));
-                }
-            }
-
-            match self.resolve_ident_in_module(
-                module,
-                *source,
-                ns,
-                &single_import.parent_scope,
-                None,
-                ignore_binding,
-                ignore_import,
-            ) {
-                Err((Determined, _)) => continue,
-                Ok(binding)
-                    if !self.is_accessible_from(binding.vis, single_import.parent_scope.module) =>
-                {
-                    continue;
-                }
-                Ok(_) | Err((Undetermined, _)) => return Err((Undetermined, Weak::No)),
-            }
+        if self.single_import_can_define_name(
+            &resolution,
+            binding,
+            ns,
+            ignore_import,
+            ignore_binding,
+            parent_scope,
+        ) {
+            return Err((Undetermined, Weak::No));
         }
 
         // So we have a resolution that's from a glob import. This resolution is determined
@@ -1101,6 +1003,129 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
         Err((Determined, Weak::No))
     }
 
+    fn finalize_module_binding(
+        &mut self,
+        ident: Ident,
+        binding: Option<NameBinding<'ra>>,
+        shadowed_glob: Option<NameBinding<'ra>>,
+        parent_scope: &ParentScope<'ra>,
+        finalize: Finalize,
+        shadowing: Shadowing,
+    ) -> Result<NameBinding<'ra>, (Determinacy, Weak)> {
+        let Finalize { path_span, report_private, used, root_span, .. } = finalize;
+
+        let Some(binding) = binding else {
+            return Err((Determined, Weak::No));
+        };
+
+        if !self.is_accessible_from(binding.vis, parent_scope.module) {
+            if report_private {
+                self.privacy_errors.push(PrivacyError {
+                    ident,
+                    binding,
+                    dedup_span: path_span,
+                    outermost_res: None,
+                    parent_scope: *parent_scope,
+                    single_nested: path_span != root_span,
+                });
+            } else {
+                return Err((Determined, Weak::No));
+            }
+        }
+
+        // Forbid expanded shadowing to avoid time travel.
+        if let Some(shadowed_glob) = shadowed_glob
+            && shadowing == Shadowing::Restricted
+            && binding.expansion != LocalExpnId::ROOT
+            && binding.res() != shadowed_glob.res()
+        {
+            self.ambiguity_errors.push(AmbiguityError {
+                kind: AmbiguityKind::GlobVsExpanded,
+                ident,
+                b1: binding,
+                b2: shadowed_glob,
+                warning: false,
+                misc1: AmbiguityErrorMisc::None,
+                misc2: AmbiguityErrorMisc::None,
+            });
+        }
+
+        if shadowing == Shadowing::Unrestricted
+            && binding.expansion != LocalExpnId::ROOT
+            && let NameBindingKind::Import { import, .. } = binding.kind
+            && matches!(import.kind, ImportKind::MacroExport)
+        {
+            self.macro_expanded_macro_export_errors.insert((path_span, binding.span));
+        }
+
+        self.record_use(ident, binding, used);
+        return Ok(binding);
+    }
+
+    // Checks if a single import can define the `Ident` corresponding to `binding`.
+    // This is used to check whether we can definitively accept a glob as a resolution.
+    fn single_import_can_define_name(
+        &mut self,
+        resolution: &NameResolution<'ra>,
+        binding: Option<NameBinding<'ra>>,
+        ns: Namespace,
+        ignore_import: Option<Import<'ra>>,
+        ignore_binding: Option<NameBinding<'ra>>,
+        parent_scope: &ParentScope<'ra>,
+    ) -> bool {
+        for single_import in &resolution.single_imports {
+            if ignore_import == Some(*single_import) {
+                continue;
+            }
+            if !self.is_accessible_from(single_import.vis, parent_scope.module) {
+                continue;
+            }
+            if let Some(ignored) = ignore_binding
+                && let NameBindingKind::Import { import, .. } = ignored.kind
+                && import == *single_import
+            {
+                continue;
+            }
+
+            let Some(module) = single_import.imported_module.get() else {
+                return true;
+            };
+            let ImportKind::Single { source, target, target_bindings, .. } = &single_import.kind
+            else {
+                unreachable!();
+            };
+            if source != target {
+                if target_bindings.iter().all(|binding| binding.get().is_none()) {
+                    return true;
+                } else if target_bindings[ns].get().is_none() && binding.is_some() {
+                    return true;
+                }
+            }
+
+            match self.resolve_ident_in_module(
+                module,
+                *source,
+                ns,
+                &single_import.parent_scope,
+                None,
+                ignore_binding,
+                ignore_import,
+            ) {
+                Err((Determined, _)) => continue,
+                Ok(binding)
+                    if !self.is_accessible_from(binding.vis, single_import.parent_scope.module) =>
+                {
+                    continue;
+                }
+                Ok(_) | Err((Undetermined, _)) => {
+                    return true;
+                }
+            }
+        }
+
+        false
+    }
+
     /// Validate a local resolution (from ribs).
     #[instrument(level = "debug", skip(self, all_ribs))]
     fn validate_res_from_ribs(
diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs
index fa04c8bc604..8114021510e 100644
--- a/compiler/rustc_resolve/src/late/diagnostics.rs
+++ b/compiler/rustc_resolve/src/late/diagnostics.rs
@@ -328,8 +328,7 @@ impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
                 let module_did = mod_prefix.as_ref().and_then(Res::mod_def_id);
 
                 let mod_prefix =
-                    mod_prefix.map_or_else(String::new, |res| (format!("{} ", res.descr())));
-
+                    mod_prefix.map_or_else(String::new, |res| format!("{} ", res.descr()));
                 (mod_prefix, format!("`{}`", Segment::names_to_string(mod_path)), module_did, None)
             };
 
@@ -1183,15 +1182,23 @@ impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
                 _ => "`self` value is a keyword only available in methods with a `self` parameter",
             },
         );
+
+        // using `let self` is wrong even if we're not in an associated method or if we're in a macro expansion.
+        // So, we should return early if we're in a pattern, see issue #143134.
+        if matches!(source, PathSource::Pat) {
+            return true;
+        }
+
         let is_assoc_fn = self.self_type_is_available();
         let self_from_macro = "a `self` parameter, but a macro invocation can only \
                                access identifiers it receives from parameters";
-        if let Some((fn_kind, span)) = &self.diag_metadata.current_function {
+        if let Some((fn_kind, fn_span)) = &self.diag_metadata.current_function {
             // The current function has a `self` parameter, but we were unable to resolve
             // a reference to `self`. This can only happen if the `self` identifier we
-            // are resolving came from a different hygiene context.
+            // are resolving came from a different hygiene context or a variable binding.
+            // But variable binding error is returned early above.
             if fn_kind.decl().inputs.get(0).is_some_and(|p| p.is_self()) {
-                err.span_label(*span, format!("this function has {self_from_macro}"));
+                err.span_label(*fn_span, format!("this function has {self_from_macro}"));
             } else {
                 let doesnt = if is_assoc_fn {
                     let (span, sugg) = fn_kind
@@ -1204,7 +1211,7 @@ impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
                             // This avoids placing the suggestion into the visibility specifier.
                             let span = fn_kind
                                 .ident()
-                                .map_or(*span, |ident| span.with_lo(ident.span.hi()));
+                                .map_or(*fn_span, |ident| fn_span.with_lo(ident.span.hi()));
                             (
                                 self.r
                                     .tcx
diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs
index f8ca20c568f..6e4c5ef1909 100644
--- a/compiler/rustc_resolve/src/lib.rs
+++ b/compiler/rustc_resolve/src/lib.rs
@@ -1128,10 +1128,12 @@ pub struct Resolver<'ra, 'tcx> {
     builtin_macros: FxHashMap<Symbol, SyntaxExtensionKind>,
     registered_tools: &'tcx RegisteredTools,
     macro_use_prelude: FxIndexMap<Symbol, NameBinding<'ra>>,
-    macro_map: FxHashMap<DefId, MacroData>,
+    local_macro_map: FxHashMap<LocalDefId, &'ra MacroData>,
+    /// Lazily populated cache of macros loaded from external crates.
+    extern_macro_map: RefCell<FxHashMap<DefId, &'ra MacroData>>,
     dummy_ext_bang: Arc<SyntaxExtension>,
     dummy_ext_derive: Arc<SyntaxExtension>,
-    non_macro_attr: MacroData,
+    non_macro_attr: &'ra MacroData,
     local_macro_def_scopes: FxHashMap<LocalDefId, Module<'ra>>,
     ast_transform_scopes: FxHashMap<LocalExpnId, Module<'ra>>,
     unused_macros: FxIndexMap<LocalDefId, (NodeId, Ident)>,
@@ -1241,6 +1243,7 @@ pub struct ResolverArenas<'ra> {
     imports: TypedArena<ImportData<'ra>>,
     name_resolutions: TypedArena<RefCell<NameResolution<'ra>>>,
     ast_paths: TypedArena<ast::Path>,
+    macros: TypedArena<MacroData>,
     dropless: DroplessArena,
 }
 
@@ -1287,7 +1290,7 @@ impl<'ra> ResolverArenas<'ra> {
         self.name_resolutions.alloc(Default::default())
     }
     fn alloc_macro_rules_scope(&'ra self, scope: MacroRulesScope<'ra>) -> MacroRulesScopeRef<'ra> {
-        Interned::new_unchecked(self.dropless.alloc(Cell::new(scope)))
+        self.dropless.alloc(Cell::new(scope))
     }
     fn alloc_macro_rules_binding(
         &'ra self,
@@ -1298,6 +1301,9 @@ impl<'ra> ResolverArenas<'ra> {
     fn alloc_ast_paths(&'ra self, paths: &[ast::Path]) -> &'ra [ast::Path] {
         self.ast_paths.alloc_from_iter(paths.iter().cloned())
     }
+    fn alloc_macro(&'ra self, macro_data: MacroData) -> &'ra MacroData {
+        self.macros.alloc(macro_data)
+    }
     fn alloc_pattern_spans(&'ra self, spans: impl Iterator<Item = Span>) -> &'ra [Span] {
         self.dropless.alloc_from_iter(spans)
     }
@@ -1540,10 +1546,12 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
             builtin_macros: Default::default(),
             registered_tools,
             macro_use_prelude: Default::default(),
-            macro_map: FxHashMap::default(),
+            local_macro_map: Default::default(),
+            extern_macro_map: Default::default(),
             dummy_ext_bang: Arc::new(SyntaxExtension::dummy_bang(edition)),
             dummy_ext_derive: Arc::new(SyntaxExtension::dummy_derive(edition)),
-            non_macro_attr: MacroData::new(Arc::new(SyntaxExtension::non_macro_attr(edition))),
+            non_macro_attr: arenas
+                .alloc_macro(MacroData::new(Arc::new(SyntaxExtension::non_macro_attr(edition)))),
             invocation_parent_scopes: Default::default(),
             output_macro_rules_scopes: Default::default(),
             macro_rules_scopes: Default::default(),
@@ -1616,6 +1624,12 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
         )
     }
 
+    fn new_local_macro(&mut self, def_id: LocalDefId, macro_data: MacroData) -> &'ra MacroData {
+        let mac = self.arenas.alloc_macro(macro_data);
+        self.local_macro_map.insert(def_id, mac);
+        mac
+    }
+
     fn next_node_id(&mut self) -> NodeId {
         let start = self.next_node_id;
         let next = start.as_u32().checked_add(1).expect("input too large; ran out of NodeIds");
@@ -1734,7 +1748,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
         f(self, MacroNS);
     }
 
-    fn is_builtin_macro(&mut self, res: Res) -> bool {
+    fn is_builtin_macro(&self, res: Res) -> bool {
         self.get_macro(res).is_some_and(|macro_data| macro_data.ext.builtin_name.is_some())
     }
 
diff --git a/compiler/rustc_resolve/src/macros.rs b/compiler/rustc_resolve/src/macros.rs
index acbefe53422..89bbe8dad98 100644
--- a/compiler/rustc_resolve/src/macros.rs
+++ b/compiler/rustc_resolve/src/macros.rs
@@ -9,7 +9,6 @@ use rustc_ast::expand::StrippedCfgItem;
 use rustc_ast::{self as ast, Crate, NodeId, attr};
 use rustc_ast_pretty::pprust;
 use rustc_attr_data_structures::StabilityLevel;
-use rustc_data_structures::intern::Interned;
 use rustc_errors::{Applicability, DiagCtxtHandle, StashKey};
 use rustc_expand::base::{
     Annotatable, DeriveResolution, Indeterminate, ResolverExpand, SyntaxExtension,
@@ -80,7 +79,7 @@ pub(crate) enum MacroRulesScope<'ra> {
 /// This helps to avoid uncontrollable growth of `macro_rules!` scope chains,
 /// which usually grow linearly with the number of macro invocations
 /// in a module (including derives) and hurt performance.
-pub(crate) type MacroRulesScopeRef<'ra> = Interned<'ra, Cell<MacroRulesScope<'ra>>>;
+pub(crate) type MacroRulesScopeRef<'ra> = &'ra Cell<MacroRulesScope<'ra>>;
 
 /// Macro namespace is separated into two sub-namespaces, one for bang macros and
 /// one for attribute-like macros (attributes, derives).
@@ -354,8 +353,8 @@ impl<'ra, 'tcx> ResolverExpand for Resolver<'ra, 'tcx> {
             if unused_arms.is_empty() {
                 continue;
             }
-            let def_id = self.local_def_id(node_id).to_def_id();
-            let m = &self.macro_map[&def_id];
+            let def_id = self.local_def_id(node_id);
+            let m = &self.local_macro_map[&def_id];
             let SyntaxExtensionKind::LegacyBang(ref ext) = m.ext.kind else {
                 continue;
             };
@@ -1132,7 +1131,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
         }
     }
 
-    pub(crate) fn check_reserved_macro_name(&mut self, ident: Ident, res: Res) {
+    pub(crate) fn check_reserved_macro_name(&self, ident: Ident, res: Res) {
         // Reserve some names that are not quite covered by the general check
         // performed on `Resolver::builtin_attrs`.
         if ident.name == sym::cfg || ident.name == sym::cfg_attr {
@@ -1148,7 +1147,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
     ///
     /// Possibly replace its expander to a pre-defined one for built-in macros.
     pub(crate) fn compile_macro(
-        &mut self,
+        &self,
         macro_def: &ast::MacroDef,
         ident: Ident,
         attrs: &[rustc_hir::Attribute],
diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs
index 4627c2978fc..a91e2140fd4 100644
--- a/compiler/rustc_session/src/config.rs
+++ b/compiler/rustc_session/src/config.rs
@@ -370,12 +370,34 @@ impl LinkSelfContained {
     }
 
     /// To help checking CLI usage while some of the values are unstable: returns whether one of the
-    /// components was set individually. This would also require the `-Zunstable-options` flag, to
-    /// be allowed.
-    fn are_unstable_variants_set(&self) -> bool {
-        let any_component_set =
-            !self.enabled_components.is_empty() || !self.disabled_components.is_empty();
-        self.explicitly_set.is_none() && any_component_set
+    /// unstable components was set individually, for the given `TargetTuple`. This would also
+    /// require the `-Zunstable-options` flag, to be allowed.
+    fn check_unstable_variants(&self, target_tuple: &TargetTuple) -> Result<(), String> {
+        if self.explicitly_set.is_some() {
+            return Ok(());
+        }
+
+        // `-C link-self-contained=-linker` is only stable on x64 linux.
+        let has_minus_linker = self.disabled_components.is_linker_enabled();
+        if has_minus_linker && target_tuple.tuple() != "x86_64-unknown-linux-gnu" {
+            return Err(format!(
+                "`-C link-self-contained=-linker` is unstable on the `{target_tuple}` \
+                    target. The `-Z unstable-options` flag must also be passed to use it on this target",
+            ));
+        }
+
+        // Any `+linker` or other component used is unstable, and that's an error.
+        let unstable_enabled = self.enabled_components;
+        let unstable_disabled = self.disabled_components - LinkSelfContainedComponents::LINKER;
+        if !unstable_enabled.union(unstable_disabled).is_empty() {
+            return Err(String::from(
+                "only `-C link-self-contained` values `y`/`yes`/`on`/`n`/`no`/`off`/`-linker` \
+                are stable, the `-Z unstable-options` flag must also be passed to use \
+                the unstable values",
+            ));
+        }
+
+        Ok(())
     }
 
     /// Returns whether the self-contained linker component was enabled on the CLI, using the
@@ -402,7 +424,7 @@ impl LinkSelfContained {
     }
 }
 
-/// The different values that `-Z linker-features` can take on the CLI: a list of individually
+/// The different values that `-C linker-features` can take on the CLI: a list of individually
 /// enabled or disabled features used during linking.
 ///
 /// There is no need to enable or disable them in bulk. Each feature is fine-grained, and can be
@@ -442,6 +464,39 @@ impl LinkerFeaturesCli {
             _ => None,
         }
     }
+
+    /// When *not* using `-Z unstable-options` on the CLI, ensure only stable linker features are
+    /// used, for the given `TargetTuple`. Returns `Ok` if no unstable variants are used.
+    /// The caller should ensure that e.g. `nightly_options::is_unstable_enabled()`
+    /// returns false.
+    pub(crate) fn check_unstable_variants(&self, target_tuple: &TargetTuple) -> Result<(), String> {
+        // `-C linker-features=-lld` is only stable on x64 linux.
+        let has_minus_lld = self.disabled.is_lld_enabled();
+        if has_minus_lld && target_tuple.tuple() != "x86_64-unknown-linux-gnu" {
+            return Err(format!(
+                "`-C linker-features=-lld` is unstable on the `{target_tuple}` \
+                    target. The `-Z unstable-options` flag must also be passed to use it on this target",
+            ));
+        }
+
+        // Any `+lld` or non-lld feature used is unstable, and that's an error.
+        let unstable_enabled = self.enabled;
+        let unstable_disabled = self.disabled - LinkerFeatures::LLD;
+        if !unstable_enabled.union(unstable_disabled).is_empty() {
+            let unstable_features: Vec<_> = unstable_enabled
+                .iter()
+                .map(|f| format!("+{}", f.as_str().unwrap()))
+                .chain(unstable_disabled.iter().map(|f| format!("-{}", f.as_str().unwrap())))
+                .collect();
+            return Err(format!(
+                "`-C linker-features={}` is unstable, and also requires the \
+                `-Z unstable-options` flag to be used",
+                unstable_features.join(","),
+            ));
+        }
+
+        Ok(())
+    }
 }
 
 /// Used with `-Z assert-incr-state`.
@@ -2638,26 +2693,21 @@ pub fn build_session_options(early_dcx: &mut EarlyDiagCtxt, matches: &getopts::M
         }
     }
 
-    if !nightly_options::is_unstable_enabled(matches)
-        && cg.force_frame_pointers == FramePointer::NonLeaf
-    {
+    let unstable_options_enabled = nightly_options::is_unstable_enabled(matches);
+    if !unstable_options_enabled && cg.force_frame_pointers == FramePointer::NonLeaf {
         early_dcx.early_fatal(
             "`-Cforce-frame-pointers=non-leaf` or `always` also requires `-Zunstable-options` \
                 and a nightly compiler",
         )
     }
 
-    // For testing purposes, until we have more feedback about these options: ensure `-Z
-    // unstable-options` is required when using the unstable `-C link-self-contained` and `-C
-    // linker-flavor` options.
-    if !nightly_options::is_unstable_enabled(matches) {
-        let uses_unstable_self_contained_option =
-            cg.link_self_contained.are_unstable_variants_set();
-        if uses_unstable_self_contained_option {
-            early_dcx.early_fatal(
-                "only `-C link-self-contained` values `y`/`yes`/`on`/`n`/`no`/`off` are stable, \
-                the `-Z unstable-options` flag must also be passed to use the unstable values",
-            );
+    let target_triple = parse_target_triple(early_dcx, matches);
+
+    // Ensure `-Z unstable-options` is required when using the unstable `-C link-self-contained` and
+    // `-C linker-flavor` options.
+    if !unstable_options_enabled {
+        if let Err(error) = cg.link_self_contained.check_unstable_variants(&target_triple) {
+            early_dcx.early_fatal(error);
         }
 
         if let Some(flavor) = cg.linker_flavor {
@@ -2697,7 +2747,6 @@ pub fn build_session_options(early_dcx: &mut EarlyDiagCtxt, matches: &getopts::M
 
     let cg = cg;
 
-    let target_triple = parse_target_triple(early_dcx, matches);
     let opt_level = parse_opt_level(early_dcx, matches, &cg);
     // The `-g` and `-C debuginfo` flags specify the same setting, so we want to be able
     // to use them interchangeably. See the note above (regarding `-O` and `-C opt-level`)
@@ -2706,6 +2755,12 @@ pub fn build_session_options(early_dcx: &mut EarlyDiagCtxt, matches: &getopts::M
     let debuginfo = select_debuginfo(matches, &cg);
     let debuginfo_compression = unstable_opts.debuginfo_compression;
 
+    if !unstable_options_enabled {
+        if let Err(error) = cg.linker_features.check_unstable_variants(&target_triple) {
+            early_dcx.early_fatal(error);
+        }
+    }
+
     let crate_name = matches.opt_str("crate-name");
     let unstable_features = UnstableFeatures::from_environment(crate_name.as_deref());
     // Parse any `-l` flags, which link to native libraries.
diff --git a/compiler/rustc_session/src/config/cfg.rs b/compiler/rustc_session/src/config/cfg.rs
index cbfe9e0da6a..62891eb4f26 100644
--- a/compiler/rustc_session/src/config/cfg.rs
+++ b/compiler/rustc_session/src/config/cfg.rs
@@ -278,7 +278,7 @@ pub(crate) fn default_configuration(sess: &Session) -> Cfg {
             };
             insert_atomic(sym::integer(i), align);
             if sess.target.pointer_width as u64 == i {
-                insert_atomic(sym::ptr, layout.pointer_align.abi);
+                insert_atomic(sym::ptr, layout.pointer_align().abi);
             }
         }
     }
diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs
index ecd82c0cc01..626262c8442 100644
--- a/compiler/rustc_session/src/options.rs
+++ b/compiler/rustc_session/src/options.rs
@@ -2015,6 +2015,8 @@ options! {
         on a C toolchain or linker installed in the system"),
     linker: Option<PathBuf> = (None, parse_opt_pathbuf, [UNTRACKED],
         "system linker to link outputs with"),
+    linker_features: LinkerFeaturesCli = (LinkerFeaturesCli::default(), parse_linker_features, [UNTRACKED],
+        "a comma-separated list of linker features to enable (+) or disable (-): `lld`"),
     linker_flavor: Option<LinkerFlavorCli> = (None, parse_linker_flavor, [UNTRACKED],
         "linker flavor"),
     linker_plugin_lto: LinkerPluginLto = (LinkerPluginLto::Disabled,
@@ -2307,8 +2309,6 @@ options! {
         "link native libraries in the linker invocation (default: yes)"),
     link_only: bool = (false, parse_bool, [TRACKED],
         "link the `.rlink` file generated by `-Z no-link` (default: no)"),
-    linker_features: LinkerFeaturesCli = (LinkerFeaturesCli::default(), parse_linker_features, [UNTRACKED],
-        "a comma-separated list of linker features to enable (+) or disable (-): `lld`"),
     lint_llvm_ir: bool = (false, parse_bool, [TRACKED],
         "lint LLVM IR (default: no)"),
     lint_mir: bool = (false, parse_bool, [UNTRACKED],
diff --git a/compiler/rustc_smir/src/alloc.rs b/compiler/rustc_smir/src/alloc.rs
index cad643f3709..fd3cf24edb5 100644
--- a/compiler/rustc_smir/src/alloc.rs
+++ b/compiler/rustc_smir/src/alloc.rs
@@ -47,15 +47,12 @@ pub fn try_new_slice<'tcx, B: Bridge>(
     let scalar_ptr = Scalar::from_pointer(ptr, &cx.tcx);
     let scalar_meta: Scalar = Scalar::from_target_usize(meta, &cx.tcx);
     let mut allocation = Allocation::new(layout.size, layout.align.abi, AllocInit::Uninit, ());
+    let ptr_size = cx.tcx.data_layout.pointer_size();
     allocation
-        .write_scalar(&cx.tcx, alloc_range(Size::ZERO, cx.tcx.data_layout.pointer_size), scalar_ptr)
+        .write_scalar(&cx.tcx, alloc_range(Size::ZERO, ptr_size), scalar_ptr)
         .map_err(|e| B::Error::from_internal(e))?;
     allocation
-        .write_scalar(
-            &cx.tcx,
-            alloc_range(cx.tcx.data_layout.pointer_size, scalar_meta.size()),
-            scalar_meta,
-        )
+        .write_scalar(&cx.tcx, alloc_range(ptr_size, scalar_meta.size()), scalar_meta)
         .map_err(|e| B::Error::from_internal(e))?;
 
     Ok(allocation)
diff --git a/compiler/rustc_smir/src/context/impls.rs b/compiler/rustc_smir/src/context/impls.rs
index 9faadabe489..fdefad2821b 100644
--- a/compiler/rustc_smir/src/context/impls.rs
+++ b/compiler/rustc_smir/src/context/impls.rs
@@ -112,7 +112,7 @@ impl<'tcx, B: Bridge> SmirCtxt<'tcx, B> {
     }
 
     pub fn target_pointer_size(&self) -> usize {
-        self.tcx.data_layout.pointer_size.bits().try_into().unwrap()
+        self.tcx.data_layout.pointer_size().bits().try_into().unwrap()
     }
 
     pub fn entry_fn(&self) -> Option<DefId> {
diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs
index 09f01d8704e..4df91cc3429 100644
--- a/compiler/rustc_span/src/symbol.rs
+++ b/compiler/rustc_span/src/symbol.rs
@@ -2194,6 +2194,7 @@ symbols! {
         type_changing_struct_update,
         type_const,
         type_id,
+        type_id_eq,
         type_ir,
         type_ir_infer_ctxt_like,
         type_ir_inherent,
diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs
index c779720f97b..27b41cc09ed 100644
--- a/compiler/rustc_target/src/callconv/loongarch.rs
+++ b/compiler/rustc_target/src/callconv/loongarch.rs
@@ -334,7 +334,7 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout + HasTargetSpec,
 {
-    let xlen = cx.data_layout().pointer_size.bits();
+    let xlen = cx.data_layout().pointer_size().bits();
     let flen = match &cx.target_spec().llvm_abiname[..] {
         "ilp32f" | "lp64f" => 32,
         "ilp32d" | "lp64d" => 64,
@@ -369,7 +369,7 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout + HasTargetSpec,
 {
-    let grlen = cx.data_layout().pointer_size.bits();
+    let grlen = cx.data_layout().pointer_size().bits();
 
     for arg in fn_abi.args.iter_mut() {
         if arg.is_ignore() {
diff --git a/compiler/rustc_target/src/callconv/mips.rs b/compiler/rustc_target/src/callconv/mips.rs
index 6162267a0d0..48a01da865b 100644
--- a/compiler/rustc_target/src/callconv/mips.rs
+++ b/compiler/rustc_target/src/callconv/mips.rs
@@ -10,7 +10,7 @@ where
         ret.extend_integer_width_to(32);
     } else {
         ret.make_indirect();
-        *offset += cx.data_layout().pointer_size;
+        *offset += cx.data_layout().pointer_size();
     }
 }
 
diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs
index 71cc2a45563..ab3271220eb 100644
--- a/compiler/rustc_target/src/callconv/mod.rs
+++ b/compiler/rustc_target/src/callconv/mod.rs
@@ -733,7 +733,7 @@ impl<'a, Ty> FnAbi<'a, Ty> {
             }
 
             if arg_idx.is_none()
-                && arg.layout.size > Primitive::Pointer(AddressSpace::DATA).size(cx) * 2
+                && arg.layout.size > Primitive::Pointer(AddressSpace::ZERO).size(cx) * 2
                 && !matches!(arg.layout.backend_repr, BackendRepr::SimdVector { .. })
             {
                 // Return values larger than 2 registers using a return area
@@ -792,7 +792,7 @@ impl<'a, Ty> FnAbi<'a, Ty> {
 
                     let size = arg.layout.size;
                     if arg.layout.is_sized()
-                        && size <= Primitive::Pointer(AddressSpace::DATA).size(cx)
+                        && size <= Primitive::Pointer(AddressSpace::ZERO).size(cx)
                     {
                         // We want to pass small aggregates as immediates, but using
                         // an LLVM aggregate type for this leads to bad optimizations,
diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs
index 6a2038f9381..a06f54d60e7 100644
--- a/compiler/rustc_target/src/callconv/riscv.rs
+++ b/compiler/rustc_target/src/callconv/riscv.rs
@@ -418,7 +418,7 @@ where
         "ilp32d" | "lp64d" => 64,
         _ => 0,
     };
-    let xlen = cx.data_layout().pointer_size.bits();
+    let xlen = cx.data_layout().pointer_size().bits();
 
     let mut avail_gprs = 8;
     let mut avail_fprs = 8;
@@ -448,7 +448,7 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
     C: HasDataLayout + HasTargetSpec,
 {
-    let xlen = cx.data_layout().pointer_size.bits();
+    let xlen = cx.data_layout().pointer_size().bits();
 
     for arg in fn_abi.args.iter_mut() {
         if arg.is_ignore() {
diff --git a/compiler/rustc_target/src/callconv/sparc.rs b/compiler/rustc_target/src/callconv/sparc.rs
index 6162267a0d0..48a01da865b 100644
--- a/compiler/rustc_target/src/callconv/sparc.rs
+++ b/compiler/rustc_target/src/callconv/sparc.rs
@@ -10,7 +10,7 @@ where
         ret.extend_integer_width_to(32);
     } else {
         ret.make_indirect();
-        *offset += cx.data_layout().pointer_size;
+        *offset += cx.data_layout().pointer_size();
     }
 }
 
diff --git a/compiler/rustc_target/src/callconv/x86.rs b/compiler/rustc_target/src/callconv/x86.rs
index bdf116ff303..918b71c80c4 100644
--- a/compiler/rustc_target/src/callconv/x86.rs
+++ b/compiler/rustc_target/src/callconv/x86.rs
@@ -219,7 +219,7 @@ where
                 // SSE ABI. We prefer this over integer registers as float scalars need to be in SSE
                 // registers for float operations, so that's the best place to pass them around.
                 fn_abi.ret.cast_to(Reg { kind: RegKind::Vector, size: fn_abi.ret.layout.size });
-            } else if fn_abi.ret.layout.size <= Primitive::Pointer(AddressSpace::DATA).size(cx) {
+            } else if fn_abi.ret.layout.size <= Primitive::Pointer(AddressSpace::ZERO).size(cx) {
                 // Same size or smaller than pointer, return in an integer register.
                 fn_abi.ret.cast_to(Reg { kind: RegKind::Integer, size: fn_abi.ret.layout.size });
             } else {
diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs
index 7a49f004072..4bc0d88a910 100644
--- a/compiler/rustc_target/src/spec/mod.rs
+++ b/compiler/rustc_target/src/spec/mod.rs
@@ -725,7 +725,7 @@ impl ToJson for LinkSelfContainedComponents {
 }
 
 bitflags::bitflags! {
-    /// The `-Z linker-features` components that can individually be enabled or disabled.
+    /// The `-C linker-features` components that can individually be enabled or disabled.
     ///
     /// They are feature flags intended to be a more flexible mechanism than linker flavors, and
     /// also to prevent a combinatorial explosion of flavors whenever a new linker feature is
@@ -756,7 +756,7 @@ bitflags::bitflags! {
 rustc_data_structures::external_bitflags_debug! { LinkerFeatures }
 
 impl LinkerFeatures {
-    /// Parses a single `-Z linker-features` well-known feature, not a set of flags.
+    /// Parses a single `-C linker-features` well-known feature, not a set of flags.
     pub fn from_str(s: &str) -> Option<LinkerFeatures> {
         Some(match s {
             "cc" => LinkerFeatures::CC,
@@ -765,6 +765,17 @@ impl LinkerFeatures {
         })
     }
 
+    /// Return the linker feature name, as would be passed on the CLI.
+    ///
+    /// Returns `None` if the bitflags aren't a singular component (but a mix of multiple flags).
+    pub fn as_str(self) -> Option<&'static str> {
+        Some(match self {
+            LinkerFeatures::CC => "cc",
+            LinkerFeatures::LLD => "lld",
+            _ => return None,
+        })
+    }
+
     /// Returns whether the `lld` linker feature is enabled.
     pub fn is_lld_enabled(self) -> bool {
         self.contains(LinkerFeatures::LLD)
@@ -2198,7 +2209,10 @@ pub struct TargetMetadata {
 
 impl Target {
     pub fn parse_data_layout(&self) -> Result<TargetDataLayout, TargetDataLayoutErrors<'_>> {
-        let mut dl = TargetDataLayout::parse_from_llvm_datalayout_string(&self.data_layout)?;
+        let mut dl = TargetDataLayout::parse_from_llvm_datalayout_string(
+            &self.data_layout,
+            self.options.default_address_space,
+        )?;
 
         // Perform consistency checks against the Target information.
         if dl.endian != self.endian {
@@ -2209,9 +2223,10 @@ impl Target {
         }
 
         let target_pointer_width: u64 = self.pointer_width.into();
-        if dl.pointer_size.bits() != target_pointer_width {
+        let dl_pointer_size: u64 = dl.pointer_size().bits();
+        if dl_pointer_size != target_pointer_width {
             return Err(TargetDataLayoutErrors::InconsistentTargetPointerWidth {
-                pointer_size: dl.pointer_size.bits(),
+                pointer_size: dl_pointer_size,
                 target: self.pointer_width,
             });
         }
@@ -2650,6 +2665,11 @@ pub struct TargetOptions {
     /// Whether the target supports XRay instrumentation.
     pub supports_xray: bool,
 
+    /// The default address space for this target. When using LLVM as a backend, most targets simply
+    /// use LLVM's default address space (0). Some other targets, such as CHERI targets, use a
+    /// custom default address space (in this specific case, `200`).
+    pub default_address_space: rustc_abi::AddressSpace,
+
     /// Whether the targets supports -Z small-data-threshold
     small_data_threshold_support: SmallDataThresholdSupport,
 }
@@ -2878,6 +2898,7 @@ impl Default for TargetOptions {
             entry_name: "main".into(),
             entry_abi: CanonAbi::C,
             supports_xray: false,
+            default_address_space: rustc_abi::AddressSpace::ZERO,
             small_data_threshold_support: SmallDataThresholdSupport::DefaultForArch,
         }
     }
diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/ambiguity.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/ambiguity.rs
index 39f115ce0cd..a8ba1baf6b9 100644
--- a/compiler/rustc_trait_selection/src/error_reporting/traits/ambiguity.rs
+++ b/compiler/rustc_trait_selection/src/error_reporting/traits/ambiguity.rs
@@ -4,7 +4,7 @@ use rustc_errors::{Applicability, Diag, E0283, E0284, E0790, MultiSpan, struct_s
 use rustc_hir as hir;
 use rustc_hir::LangItem;
 use rustc_hir::def::{DefKind, Res};
-use rustc_hir::def_id::DefId;
+use rustc_hir::def_id::{CRATE_DEF_ID, DefId};
 use rustc_hir::intravisit::Visitor as _;
 use rustc_infer::infer::{BoundRegionConversionTime, InferCtxt};
 use rustc_infer::traits::util::elaborate;
@@ -128,19 +128,26 @@ pub fn compute_applicable_impls_for_diagnostics<'tcx>(
         },
     );
 
-    let predicates =
-        tcx.predicates_of(obligation.cause.body_id.to_def_id()).instantiate_identity(tcx);
-    for (pred, span) in elaborate(tcx, predicates.into_iter()) {
-        let kind = pred.kind();
-        if let ty::ClauseKind::Trait(trait_pred) = kind.skip_binder()
-            && param_env_candidate_may_apply(kind.rebind(trait_pred))
-        {
-            if kind.rebind(trait_pred.trait_ref)
-                == ty::Binder::dummy(ty::TraitRef::identity(tcx, trait_pred.def_id()))
+    // If our `body_id` has been set (and isn't just from a dummy obligation cause),
+    // then try to look for a param-env clause that would apply. The way we compute
+    // this is somewhat manual, since we need the spans, so we elaborate this directly
+    // from `predicates_of` rather than actually looking at the param-env which
+    // otherwise would be more appropriate.
+    let body_id = obligation.cause.body_id;
+    if body_id != CRATE_DEF_ID {
+        let predicates = tcx.predicates_of(body_id.to_def_id()).instantiate_identity(tcx);
+        for (pred, span) in elaborate(tcx, predicates.into_iter()) {
+            let kind = pred.kind();
+            if let ty::ClauseKind::Trait(trait_pred) = kind.skip_binder()
+                && param_env_candidate_may_apply(kind.rebind(trait_pred))
             {
-                ambiguities.push(CandidateSource::ParamEnv(tcx.def_span(trait_pred.def_id())))
-            } else {
-                ambiguities.push(CandidateSource::ParamEnv(span))
+                if kind.rebind(trait_pred.trait_ref)
+                    == ty::Binder::dummy(ty::TraitRef::identity(tcx, trait_pred.def_id()))
+                {
+                    ambiguities.push(CandidateSource::ParamEnv(tcx.def_span(trait_pred.def_id())))
+                } else {
+                    ambiguities.push(CandidateSource::ParamEnv(span))
+                }
             }
         }
     }
diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs
index 28d572b303a..cd3e6c4bc54 100644
--- a/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs
+++ b/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs
@@ -1933,7 +1933,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
                     StringPart::highlighted("multiple different versions".to_string()),
                     StringPart::normal(" of crate `".to_string()),
                     StringPart::highlighted(format!("{crate_name}")),
-                    StringPart::normal("` in the dependency graph\n".to_string()),
+                    StringPart::normal("` in the dependency graph".to_string()),
                 ],
             );
             if points_at_type {
diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs
index 362052e9fdb..8c9eb41568f 100644
--- a/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs
+++ b/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs
@@ -1187,6 +1187,13 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
         has_custom_message: bool,
     ) -> bool {
         let span = obligation.cause.span;
+        let param_env = obligation.param_env;
+
+        let mk_result = |trait_pred_and_new_ty| {
+            let obligation =
+                self.mk_trait_obligation_with_new_self_ty(param_env, trait_pred_and_new_ty);
+            self.predicate_must_hold_modulo_regions(&obligation)
+        };
 
         let code = match obligation.cause.code() {
             ObligationCauseCode::FunctionArg { parent_code, .. } => parent_code,
@@ -1195,6 +1202,76 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
             c @ ObligationCauseCode::WhereClauseInExpr(_, _, hir_id, _)
                 if self.tcx.hir_span(*hir_id).lo() == span.lo() =>
             {
+                // `hir_id` corresponds to the HIR node that introduced a `where`-clause obligation.
+                // If that obligation comes from a type in an associated method call, we need
+                // special handling here.
+                if let hir::Node::Expr(expr) = self.tcx.parent_hir_node(*hir_id)
+                    && let hir::ExprKind::Call(base, _) = expr.kind
+                    && let hir::ExprKind::Path(hir::QPath::TypeRelative(ty, segment)) = base.kind
+                    && let hir::Node::Expr(outer) = self.tcx.parent_hir_node(expr.hir_id)
+                    && let hir::ExprKind::AddrOf(hir::BorrowKind::Ref, mtbl, _) = outer.kind
+                    && ty.span == span
+                {
+                    // We've encountered something like `&str::from("")`, where the intended code
+                    // was likely `<&str>::from("")`. The former is interpreted as "call method
+                    // `from` on `str` and borrow the result", while the latter means "call method
+                    // `from` on `&str`".
+
+                    let trait_pred_and_imm_ref = poly_trait_pred.map_bound(|p| {
+                        (p, Ty::new_imm_ref(self.tcx, self.tcx.lifetimes.re_static, p.self_ty()))
+                    });
+                    let trait_pred_and_mut_ref = poly_trait_pred.map_bound(|p| {
+                        (p, Ty::new_mut_ref(self.tcx, self.tcx.lifetimes.re_static, p.self_ty()))
+                    });
+
+                    let imm_ref_self_ty_satisfies_pred = mk_result(trait_pred_and_imm_ref);
+                    let mut_ref_self_ty_satisfies_pred = mk_result(trait_pred_and_mut_ref);
+                    let sugg_msg = |pre: &str| {
+                        format!(
+                            "you likely meant to call the associated function `{FN}` for type \
+                             `&{pre}{TY}`, but the code as written calls associated function `{FN}` on \
+                             type `{TY}`",
+                            FN = segment.ident,
+                            TY = poly_trait_pred.self_ty(),
+                        )
+                    };
+                    match (imm_ref_self_ty_satisfies_pred, mut_ref_self_ty_satisfies_pred, mtbl) {
+                        (true, _, hir::Mutability::Not) | (_, true, hir::Mutability::Mut) => {
+                            err.multipart_suggestion_verbose(
+                                sugg_msg(mtbl.prefix_str()),
+                                vec![
+                                    (outer.span.shrink_to_lo(), "<".to_string()),
+                                    (span.shrink_to_hi(), ">".to_string()),
+                                ],
+                                Applicability::MachineApplicable,
+                            );
+                        }
+                        (true, _, hir::Mutability::Mut) => {
+                            // There's an associated function found on the immutable borrow of the
+                            err.multipart_suggestion_verbose(
+                                sugg_msg("mut "),
+                                vec![
+                                    (outer.span.shrink_to_lo().until(span), "<&".to_string()),
+                                    (span.shrink_to_hi(), ">".to_string()),
+                                ],
+                                Applicability::MachineApplicable,
+                            );
+                        }
+                        (_, true, hir::Mutability::Not) => {
+                            err.multipart_suggestion_verbose(
+                                sugg_msg(""),
+                                vec![
+                                    (outer.span.shrink_to_lo().until(span), "<&mut ".to_string()),
+                                    (span.shrink_to_hi(), ">".to_string()),
+                                ],
+                                Applicability::MachineApplicable,
+                            );
+                        }
+                        _ => {}
+                    }
+                    // If we didn't return early here, we would instead suggest `&&str::from("")`.
+                    return false;
+                }
                 c
             }
             c if matches!(
@@ -1220,8 +1297,6 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
             never_suggest_borrow.push(def_id);
         }
 
-        let param_env = obligation.param_env;
-
         // Try to apply the original trait bound by borrowing.
         let mut try_borrowing = |old_pred: ty::PolyTraitPredicate<'tcx>,
                                  blacklist: &[DefId]|
@@ -1243,15 +1318,10 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
                 )
             });
 
-            let mk_result = |trait_pred_and_new_ty| {
-                let obligation =
-                    self.mk_trait_obligation_with_new_self_ty(param_env, trait_pred_and_new_ty);
-                self.predicate_must_hold_modulo_regions(&obligation)
-            };
             let imm_ref_self_ty_satisfies_pred = mk_result(trait_pred_and_imm_ref);
             let mut_ref_self_ty_satisfies_pred = mk_result(trait_pred_and_mut_ref);
 
-            let (ref_inner_ty_satisfies_pred, ref_inner_ty_mut) =
+            let (ref_inner_ty_satisfies_pred, ref_inner_ty_is_mut) =
                 if let ObligationCauseCode::WhereClauseInExpr(..) = obligation.cause.code()
                     && let ty::Ref(_, ty, mutability) = old_pred.self_ty().skip_binder().kind()
                 {
@@ -1263,117 +1333,139 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
                     (false, false)
                 };
 
-            if imm_ref_self_ty_satisfies_pred
-                || mut_ref_self_ty_satisfies_pred
-                || ref_inner_ty_satisfies_pred
-            {
-                if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
-                    // We don't want a borrowing suggestion on the fields in structs,
-                    // ```
-                    // struct Foo {
-                    //  the_foos: Vec<Foo>
-                    // }
-                    // ```
-                    if !matches!(
-                        span.ctxt().outer_expn_data().kind,
-                        ExpnKind::Root | ExpnKind::Desugaring(DesugaringKind::ForLoop)
-                    ) {
-                        return false;
-                    }
-                    if snippet.starts_with('&') {
-                        // This is already a literal borrow and the obligation is failing
-                        // somewhere else in the obligation chain. Do not suggest non-sense.
-                        return false;
-                    }
-                    // We have a very specific type of error, where just borrowing this argument
-                    // might solve the problem. In cases like this, the important part is the
-                    // original type obligation, not the last one that failed, which is arbitrary.
-                    // Because of this, we modify the error to refer to the original obligation and
-                    // return early in the caller.
-
-                    let msg = format!(
-                        "the trait bound `{}` is not satisfied",
-                        self.tcx.short_string(old_pred, err.long_ty_path()),
-                    );
-                    let self_ty_str =
-                        self.tcx.short_string(old_pred.self_ty().skip_binder(), err.long_ty_path());
-                    if has_custom_message {
-                        err.note(msg);
-                    } else {
-                        err.messages = vec![(rustc_errors::DiagMessage::from(msg), Style::NoStyle)];
-                    }
-                    err.span_label(
-                        span,
-                        format!(
-                            "the trait `{}` is not implemented for `{self_ty_str}`",
-                            old_pred.print_modifiers_and_trait_path()
-                        ),
-                    );
+            let is_immut = imm_ref_self_ty_satisfies_pred
+                || (ref_inner_ty_satisfies_pred && !ref_inner_ty_is_mut);
+            let is_mut = mut_ref_self_ty_satisfies_pred || ref_inner_ty_is_mut;
+            if !is_immut && !is_mut {
+                return false;
+            }
+            let Ok(_snippet) = self.tcx.sess.source_map().span_to_snippet(span) else {
+                return false;
+            };
+            // We don't want a borrowing suggestion on the fields in structs
+            // ```
+            // #[derive(Clone)]
+            // struct Foo {
+            //     the_foos: Vec<Foo>
+            // }
+            // ```
+            if !matches!(
+                span.ctxt().outer_expn_data().kind,
+                ExpnKind::Root | ExpnKind::Desugaring(DesugaringKind::ForLoop)
+            ) {
+                return false;
+            }
+            // We have a very specific type of error, where just borrowing this argument
+            // might solve the problem. In cases like this, the important part is the
+            // original type obligation, not the last one that failed, which is arbitrary.
+            // Because of this, we modify the error to refer to the original obligation and
+            // return early in the caller.
 
-                    if imm_ref_self_ty_satisfies_pred && mut_ref_self_ty_satisfies_pred {
-                        err.span_suggestions(
-                            span.shrink_to_lo(),
-                            "consider borrowing here",
-                            ["&".to_string(), "&mut ".to_string()],
-                            Applicability::MaybeIncorrect,
-                        );
-                    } else {
-                        let is_mut = mut_ref_self_ty_satisfies_pred || ref_inner_ty_mut;
-                        let sugg_prefix = format!("&{}", if is_mut { "mut " } else { "" });
-                        let sugg_msg = format!(
-                            "consider{} borrowing here",
-                            if is_mut { " mutably" } else { "" }
-                        );
+            let mut label = || {
+                let msg = format!(
+                    "the trait bound `{}` is not satisfied",
+                    self.tcx.short_string(old_pred, err.long_ty_path()),
+                );
+                let self_ty_str =
+                    self.tcx.short_string(old_pred.self_ty().skip_binder(), err.long_ty_path());
+                if has_custom_message {
+                    err.note(msg);
+                } else {
+                    err.messages = vec![(rustc_errors::DiagMessage::from(msg), Style::NoStyle)];
+                }
+                err.span_label(
+                    span,
+                    format!(
+                        "the trait `{}` is not implemented for `{self_ty_str}`",
+                        old_pred.print_modifiers_and_trait_path()
+                    ),
+                );
+            };
 
-                        // Issue #109436, we need to add parentheses properly for method calls
-                        // for example, `foo.into()` should be `(&foo).into()`
-                        if let Some(_) =
-                            self.tcx.sess.source_map().span_look_ahead(span, ".", Some(50))
-                        {
-                            err.multipart_suggestion_verbose(
-                                sugg_msg,
-                                vec![
-                                    (span.shrink_to_lo(), format!("({sugg_prefix}")),
-                                    (span.shrink_to_hi(), ")".to_string()),
-                                ],
-                                Applicability::MaybeIncorrect,
-                            );
-                            return true;
-                        }
+            let mut sugg_prefixes = vec![];
+            if is_immut {
+                sugg_prefixes.push("&");
+            }
+            if is_mut {
+                sugg_prefixes.push("&mut ");
+            }
+            let sugg_msg = format!(
+                "consider{} borrowing here",
+                if is_mut && !is_immut { " mutably" } else { "" },
+            );
 
-                        // Issue #104961, we need to add parentheses properly for compound expressions
-                        // for example, `x.starts_with("hi".to_string() + "you")`
-                        // should be `x.starts_with(&("hi".to_string() + "you"))`
-                        let Some(body) = self.tcx.hir_maybe_body_owned_by(obligation.cause.body_id)
-                        else {
-                            return false;
-                        };
-                        let mut expr_finder = FindExprBySpan::new(span, self.tcx);
-                        expr_finder.visit_expr(body.value);
-                        let Some(expr) = expr_finder.result else {
-                            return false;
-                        };
-                        let needs_parens = expr_needs_parens(expr);
+            // Issue #104961, we need to add parentheses properly for compound expressions
+            // for example, `x.starts_with("hi".to_string() + "you")`
+            // should be `x.starts_with(&("hi".to_string() + "you"))`
+            let Some(body) = self.tcx.hir_maybe_body_owned_by(obligation.cause.body_id) else {
+                return false;
+            };
+            let mut expr_finder = FindExprBySpan::new(span, self.tcx);
+            expr_finder.visit_expr(body.value);
 
-                        let span = if needs_parens { span } else { span.shrink_to_lo() };
-                        let suggestions = if !needs_parens {
-                            vec![(span.shrink_to_lo(), sugg_prefix)]
-                        } else {
+            if let Some(ty) = expr_finder.ty_result {
+                if let hir::Node::Expr(expr) = self.tcx.parent_hir_node(ty.hir_id)
+                    && let hir::ExprKind::Path(hir::QPath::TypeRelative(_, _)) = expr.kind
+                    && ty.span == span
+                {
+                    // We've encountered something like `str::from("")`, where the intended code
+                    // was likely `<&str>::from("")`. #143393.
+                    label();
+                    err.multipart_suggestions(
+                        sugg_msg,
+                        sugg_prefixes.into_iter().map(|sugg_prefix| {
                             vec![
-                                (span.shrink_to_lo(), format!("{sugg_prefix}(")),
-                                (span.shrink_to_hi(), ")".to_string()),
+                                (span.shrink_to_lo(), format!("<{sugg_prefix}")),
+                                (span.shrink_to_hi(), ">".to_string()),
                             ]
-                        };
-                        err.multipart_suggestion_verbose(
-                            sugg_msg,
-                            suggestions,
-                            Applicability::MaybeIncorrect,
-                        );
-                    }
+                        }),
+                        Applicability::MaybeIncorrect,
+                    );
                     return true;
                 }
+                return false;
             }
-            return false;
+            let Some(expr) = expr_finder.result else {
+                return false;
+            };
+            if let hir::ExprKind::AddrOf(_, _, _) = expr.kind {
+                return false;
+            }
+            let needs_parens_post = expr_needs_parens(expr);
+            let needs_parens_pre = match self.tcx.parent_hir_node(expr.hir_id) {
+                Node::Expr(e)
+                    if let hir::ExprKind::MethodCall(_, base, _, _) = e.kind
+                        && base.hir_id == expr.hir_id =>
+                {
+                    true
+                }
+                _ => false,
+            };
+
+            label();
+            let suggestions = sugg_prefixes.into_iter().map(|sugg_prefix| {
+                match (needs_parens_pre, needs_parens_post) {
+                    (false, false) => vec![(span.shrink_to_lo(), sugg_prefix.to_string())],
+                    // We have something like `foo.bar()`, where we want to bororw foo, so we need
+                    // to suggest `(&mut foo).bar()`.
+                    (false, true) => vec![
+                        (span.shrink_to_lo(), format!("{sugg_prefix}(")),
+                        (span.shrink_to_hi(), ")".to_string()),
+                    ],
+                    // Issue #109436, we need to add parentheses properly for method calls
+                    // for example, `foo.into()` should be `(&foo).into()`
+                    (true, false) => vec![
+                        (span.shrink_to_lo(), format!("({sugg_prefix}")),
+                        (span.shrink_to_hi(), ")".to_string()),
+                    ],
+                    (true, true) => vec![
+                        (span.shrink_to_lo(), format!("({sugg_prefix}(")),
+                        (span.shrink_to_hi(), "))".to_string()),
+                    ],
+                }
+            });
+            err.multipart_suggestions(sugg_msg, suggestions, Applicability::MaybeIncorrect);
+            return true;
         };
 
         if let ObligationCauseCode::ImplDerived(cause) = &*code {
@@ -1511,12 +1603,15 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
         'outer: loop {
             while let hir::ExprKind::AddrOf(_, _, borrowed) = expr.kind {
                 count += 1;
-                let span = if expr.span.eq_ctxt(borrowed.span) {
-                    expr.span.until(borrowed.span)
-                } else {
-                    expr.span.with_hi(expr.span.lo() + BytePos(1))
-                };
+                let span =
+                    if let Some(borrowed_span) = borrowed.span.find_ancestor_inside(expr.span) {
+                        expr.span.until(borrowed_span)
+                    } else {
+                        break 'outer;
+                    };
 
+                // Double check that the span we extracted actually corresponds to a borrow,
+                // rather than some macro garbage.
                 match self.tcx.sess.source_map().span_to_snippet(span) {
                     Ok(snippet) if snippet.starts_with("&") => {}
                     _ => break 'outer,
diff --git a/compiler/rustc_trait_selection/src/traits/effects.rs b/compiler/rustc_trait_selection/src/traits/effects.rs
index 176d308de91..d694a092853 100644
--- a/compiler/rustc_trait_selection/src/traits/effects.rs
+++ b/compiler/rustc_trait_selection/src/traits/effects.rs
@@ -1,7 +1,8 @@
 use rustc_hir::{self as hir, LangItem};
 use rustc_infer::infer::{BoundRegionConversionTime, DefineOpaqueTypes};
 use rustc_infer::traits::{
-    ImplDerivedHostCause, ImplSource, Obligation, ObligationCauseCode, PredicateObligation,
+    ImplDerivedHostCause, ImplSource, Obligation, ObligationCause, ObligationCauseCode,
+    PredicateObligation,
 };
 use rustc_middle::span_bug;
 use rustc_middle::traits::query::NoSolution;
@@ -303,6 +304,9 @@ fn evaluate_host_effect_from_builtin_impls<'tcx>(
 ) -> Result<ThinVec<PredicateObligation<'tcx>>, EvaluationFailure> {
     match selcx.tcx().as_lang_item(obligation.predicate.def_id()) {
         Some(LangItem::Destruct) => evaluate_host_effect_for_destruct_goal(selcx, obligation),
+        Some(LangItem::Fn | LangItem::FnMut | LangItem::FnOnce) => {
+            evaluate_host_effect_for_fn_goal(selcx, obligation)
+        }
         _ => Err(EvaluationFailure::NoSolution),
     }
 }
@@ -398,6 +402,51 @@ fn evaluate_host_effect_for_destruct_goal<'tcx>(
         .collect())
 }
 
+// NOTE: Keep this in sync with `extract_fn_def_from_const_callable` in the new solver.
+fn evaluate_host_effect_for_fn_goal<'tcx>(
+    selcx: &mut SelectionContext<'_, 'tcx>,
+    obligation: &HostEffectObligation<'tcx>,
+) -> Result<ThinVec<PredicateObligation<'tcx>>, EvaluationFailure> {
+    let tcx = selcx.tcx();
+    let self_ty = obligation.predicate.self_ty();
+
+    let (def, args) = match *self_ty.kind() {
+        ty::FnDef(def, args) => (def, args),
+
+        // We may support function pointers at some point in the future
+        ty::FnPtr(..) => return Err(EvaluationFailure::NoSolution),
+
+        // Closures could implement `[const] Fn`,
+        // but they don't really need to right now.
+        ty::Closure(..) | ty::CoroutineClosure(_, _) => {
+            return Err(EvaluationFailure::NoSolution);
+        }
+
+        // Everything else needs explicit impls or cannot have an impl
+        _ => return Err(EvaluationFailure::NoSolution),
+    };
+
+    match tcx.constness(def) {
+        hir::Constness::Const => Ok(tcx
+            .const_conditions(def)
+            .instantiate(tcx, args)
+            .into_iter()
+            .map(|(c, span)| {
+                let code = ObligationCauseCode::WhereClause(def, span);
+                let cause =
+                    ObligationCause::new(obligation.cause.span, obligation.cause.body_id, code);
+                Obligation::new(
+                    tcx,
+                    cause,
+                    obligation.param_env,
+                    c.to_host_effect_clause(tcx, obligation.predicate.constness),
+                )
+            })
+            .collect()),
+        hir::Constness::NotConst => Err(EvaluationFailure::NoSolution),
+    }
+}
+
 fn evaluate_host_effect_from_selection_candidate<'tcx>(
     selcx: &mut SelectionContext<'_, 'tcx>,
     obligation: &HostEffectObligation<'tcx>,
diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
index 81ce58a93fa..cc188a280aa 100644
--- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
@@ -19,7 +19,7 @@ use rustc_middle::{bug, span_bug};
 use tracing::{debug, instrument, trace};
 
 use super::SelectionCandidate::*;
-use super::{BuiltinImplConditions, SelectionCandidateSet, SelectionContext, TraitObligationStack};
+use super::{SelectionCandidateSet, SelectionContext, TraitObligationStack};
 use crate::traits::util;
 
 impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
@@ -75,27 +75,29 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
                     self.assemble_candidates_from_impls(obligation, &mut candidates);
 
                     // For other types, we'll use the builtin rules.
-                    let copy_conditions = self.copy_clone_conditions(obligation);
-                    self.assemble_builtin_bound_candidates(copy_conditions, &mut candidates);
+                    self.assemble_builtin_copy_clone_candidate(
+                        obligation.predicate.self_ty().skip_binder(),
+                        &mut candidates,
+                    );
                 }
                 Some(LangItem::DiscriminantKind) => {
                     // `DiscriminantKind` is automatically implemented for every type.
-                    candidates.vec.push(BuiltinCandidate { has_nested: false });
+                    candidates.vec.push(BuiltinCandidate);
                 }
                 Some(LangItem::PointeeTrait) => {
                     // `Pointee` is automatically implemented for every type.
-                    candidates.vec.push(BuiltinCandidate { has_nested: false });
+                    candidates.vec.push(BuiltinCandidate);
                 }
                 Some(LangItem::Sized) => {
                     self.assemble_builtin_sized_candidate(
-                        obligation,
+                        obligation.predicate.self_ty().skip_binder(),
                         &mut candidates,
                         SizedTraitKind::Sized,
                     );
                 }
                 Some(LangItem::MetaSized) => {
                     self.assemble_builtin_sized_candidate(
-                        obligation,
+                        obligation.predicate.self_ty().skip_binder(),
                         &mut candidates,
                         SizedTraitKind::MetaSized,
                     );
@@ -357,15 +359,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         obligation: &PolyTraitObligation<'tcx>,
         candidates: &mut SelectionCandidateSet<'tcx>,
     ) {
-        let self_ty = obligation.self_ty().skip_binder();
-        // gen constructs get lowered to a special kind of coroutine that
-        // should directly `impl FusedIterator`.
-        if let ty::Coroutine(did, ..) = self_ty.kind()
-            && self.tcx().coroutine_is_gen(*did)
-        {
-            debug!(?self_ty, ?obligation, "assemble_fused_iterator_candidates",);
-
-            candidates.vec.push(BuiltinCandidate { has_nested: false });
+        if self.coroutine_is_gen(obligation.self_ty().skip_binder()) {
+            candidates.vec.push(BuiltinCandidate);
         }
     }
 
@@ -810,7 +805,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
                         hir::Movability::Movable => {
                             // Movable coroutines are always `Unpin`, so add an
                             // unconditional builtin candidate.
-                            candidates.vec.push(BuiltinCandidate { has_nested: false });
+                            candidates.vec.push(BuiltinCandidate);
                         }
                     }
                 }
@@ -1113,45 +1108,164 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         }
     }
 
-    /// Assembles the `Sized` and `MetaSized` traits which are built-in to the language itself.
+    /// Assembles `Copy` and `Clone` candidates for built-in types with no libcore-defined
+    /// `Copy` or `Clone` impls.
     #[instrument(level = "debug", skip(self, candidates))]
-    fn assemble_builtin_sized_candidate(
+    fn assemble_builtin_copy_clone_candidate(
         &mut self,
-        obligation: &PolyTraitObligation<'tcx>,
+        self_ty: Ty<'tcx>,
         candidates: &mut SelectionCandidateSet<'tcx>,
-        sizedness: SizedTraitKind,
     ) {
-        match self.sizedness_conditions(obligation, sizedness) {
-            BuiltinImplConditions::Where(nested) => {
-                candidates
-                    .vec
-                    .push(SizedCandidate { has_nested: !nested.skip_binder().is_empty() });
+        match *self_ty.kind() {
+            // These impls are built-in because we cannot express sufficiently
+            // generic impls in libcore.
+            ty::FnDef(..)
+            | ty::FnPtr(..)
+            | ty::Error(_)
+            | ty::Tuple(..)
+            | ty::CoroutineWitness(..)
+            | ty::Pat(..) => {
+                candidates.vec.push(BuiltinCandidate);
+            }
+
+            // Implementations provided in libcore.
+            ty::Uint(_)
+            | ty::Int(_)
+            | ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
+            | ty::Bool
+            | ty::Float(_)
+            | ty::Char
+            | ty::RawPtr(..)
+            | ty::Never
+            | ty::Ref(_, _, hir::Mutability::Not)
+            | ty::Array(..) => {}
+
+            // FIXME(unsafe_binder): Should we conditionally
+            // (i.e. universally) implement copy/clone?
+            ty::UnsafeBinder(_) => {}
+
+            // Not `Sized`, which is a supertrait of `Copy`/`Clone`.
+            ty::Dynamic(..) | ty::Str | ty::Slice(..) | ty::Foreign(..) => {}
+
+            // Not `Copy` or `Clone` by design.
+            ty::Ref(_, _, hir::Mutability::Mut) => {}
+
+            ty::Coroutine(coroutine_def_id, args) => {
+                match self.tcx().coroutine_movability(coroutine_def_id) {
+                    hir::Movability::Static => {}
+                    hir::Movability::Movable => {
+                        if self.tcx().features().coroutine_clone() {
+                            let resolved_upvars =
+                                self.infcx.shallow_resolve(args.as_coroutine().tupled_upvars_ty());
+                            let resolved_witness =
+                                self.infcx.shallow_resolve(args.as_coroutine().witness());
+                            if resolved_upvars.is_ty_var() || resolved_witness.is_ty_var() {
+                                // Not yet resolved.
+                                candidates.ambiguous = true;
+                            } else {
+                                candidates.vec.push(BuiltinCandidate);
+                            }
+                        }
+                    }
+                }
+            }
+
+            ty::Closure(_, args) => {
+                let resolved_upvars =
+                    self.infcx.shallow_resolve(args.as_closure().tupled_upvars_ty());
+                if resolved_upvars.is_ty_var() {
+                    // Not yet resolved.
+                    candidates.ambiguous = true;
+                } else {
+                    candidates.vec.push(BuiltinCandidate);
+                }
+            }
+
+            ty::CoroutineClosure(_, args) => {
+                let resolved_upvars =
+                    self.infcx.shallow_resolve(args.as_coroutine_closure().tupled_upvars_ty());
+                if resolved_upvars.is_ty_var() {
+                    // Not yet resolved.
+                    candidates.ambiguous = true;
+                } else {
+                    candidates.vec.push(BuiltinCandidate);
+                }
             }
-            BuiltinImplConditions::None => {}
-            BuiltinImplConditions::Ambiguous => {
+
+            // Fallback to whatever user-defined impls or param-env clauses exist in this case.
+            ty::Adt(..) | ty::Alias(..) | ty::Param(..) | ty::Placeholder(..) => {}
+
+            ty::Infer(ty::TyVar(_)) => {
                 candidates.ambiguous = true;
             }
+
+            // Only appears when assembling higher-ranked `for<T> T: Clone`.
+            ty::Bound(..) => {}
+
+            ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+                bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
+            }
         }
     }
 
-    /// Assembles the trait which are built-in to the language itself:
-    /// e.g. `Copy` and `Clone`.
+    /// Assembles the `Sized` and `MetaSized` traits which are built-in to the language itself.
     #[instrument(level = "debug", skip(self, candidates))]
-    fn assemble_builtin_bound_candidates(
+    fn assemble_builtin_sized_candidate(
         &mut self,
-        conditions: BuiltinImplConditions<'tcx>,
+        self_ty: Ty<'tcx>,
         candidates: &mut SelectionCandidateSet<'tcx>,
+        sizedness: SizedTraitKind,
     ) {
-        match conditions {
-            BuiltinImplConditions::Where(nested) => {
-                candidates
-                    .vec
-                    .push(BuiltinCandidate { has_nested: !nested.skip_binder().is_empty() });
+        match *self_ty.kind() {
+            // Always sized.
+            ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
+            | ty::Uint(_)
+            | ty::Int(_)
+            | ty::Bool
+            | ty::Float(_)
+            | ty::FnDef(..)
+            | ty::FnPtr(..)
+            | ty::RawPtr(..)
+            | ty::Char
+            | ty::Ref(..)
+            | ty::Coroutine(..)
+            | ty::CoroutineWitness(..)
+            | ty::Array(..)
+            | ty::Closure(..)
+            | ty::CoroutineClosure(..)
+            | ty::Never
+            | ty::Error(_) => {
+                candidates.vec.push(SizedCandidate);
+            }
+
+            // Conditionally `Sized`.
+            ty::Tuple(..) | ty::Pat(..) | ty::Adt(..) | ty::UnsafeBinder(_) => {
+                candidates.vec.push(SizedCandidate);
             }
-            BuiltinImplConditions::None => {}
-            BuiltinImplConditions::Ambiguous => {
+
+            // `MetaSized` but not `Sized`.
+            ty::Str | ty::Slice(_) | ty::Dynamic(..) => match sizedness {
+                SizedTraitKind::Sized => {}
+                SizedTraitKind::MetaSized => {
+                    candidates.vec.push(SizedCandidate);
+                }
+            },
+
+            // Not `MetaSized` or `Sized`.
+            ty::Foreign(..) => {}
+
+            ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => {}
+
+            ty::Infer(ty::TyVar(_)) => {
                 candidates.ambiguous = true;
             }
+
+            // Only appears when assembling higher-ranked `for<T> T: Sized`.
+            ty::Bound(..) => {}
+
+            ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+                bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
+            }
         }
     }
 
@@ -1160,7 +1274,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         _obligation: &PolyTraitObligation<'tcx>,
         candidates: &mut SelectionCandidateSet<'tcx>,
     ) {
-        candidates.vec.push(BuiltinCandidate { has_nested: false });
+        candidates.vec.push(BuiltinCandidate);
     }
 
     fn assemble_candidate_for_tuple(
@@ -1171,7 +1285,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
         match self_ty.kind() {
             ty::Tuple(_) => {
-                candidates.vec.push(BuiltinCandidate { has_nested: false });
+                candidates.vec.push(BuiltinCandidate);
             }
             ty::Infer(ty::TyVar(_)) => {
                 candidates.ambiguous = true;
@@ -1215,7 +1329,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         let self_ty = self.infcx.resolve_vars_if_possible(obligation.self_ty());
 
         match self_ty.skip_binder().kind() {
-            ty::FnPtr(..) => candidates.vec.push(BuiltinCandidate { has_nested: false }),
+            ty::FnPtr(..) => candidates.vec.push(BuiltinCandidate),
             ty::Bool
             | ty::Char
             | ty::Int(_)
diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
index a6b77583fdc..ee8cef20279 100644
--- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
@@ -21,7 +21,7 @@ use thin_vec::thin_vec;
 use tracing::{debug, instrument};
 
 use super::SelectionCandidate::{self, *};
-use super::{BuiltinImplConditions, PredicateObligations, SelectionContext};
+use super::{PredicateObligations, SelectionContext};
 use crate::traits::normalize::{normalize_with_depth, normalize_with_depth_to};
 use crate::traits::util::{self, closure_trait_ref_and_return_type};
 use crate::traits::{
@@ -37,13 +37,13 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         candidate: SelectionCandidate<'tcx>,
     ) -> Result<Selection<'tcx>, SelectionError<'tcx>> {
         Ok(match candidate {
-            SizedCandidate { has_nested } => {
-                let data = self.confirm_builtin_candidate(obligation, has_nested);
+            SizedCandidate => {
+                let data = self.confirm_builtin_candidate(obligation);
                 ImplSource::Builtin(BuiltinImplSource::Misc, data)
             }
 
-            BuiltinCandidate { has_nested } => {
-                let data = self.confirm_builtin_candidate(obligation, has_nested);
+            BuiltinCandidate => {
+                let data = self.confirm_builtin_candidate(obligation);
                 ImplSource::Builtin(BuiltinImplSource::Misc, data)
             }
 
@@ -249,50 +249,53 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         }
     }
 
+    #[instrument(level = "debug", skip(self), ret)]
     fn confirm_builtin_candidate(
         &mut self,
         obligation: &PolyTraitObligation<'tcx>,
-        has_nested: bool,
     ) -> PredicateObligations<'tcx> {
-        debug!(?obligation, ?has_nested, "confirm_builtin_candidate");
-
+        debug!(?obligation, "confirm_builtin_candidate");
         let tcx = self.tcx();
-        let obligations = if has_nested {
-            let trait_def = obligation.predicate.def_id();
-            let conditions = match tcx.as_lang_item(trait_def) {
-                Some(LangItem::Sized) => {
-                    self.sizedness_conditions(obligation, SizedTraitKind::Sized)
-                }
-                Some(LangItem::MetaSized) => {
-                    self.sizedness_conditions(obligation, SizedTraitKind::MetaSized)
-                }
-                Some(LangItem::PointeeSized) => {
-                    bug!("`PointeeSized` is removing during lowering");
+        let trait_def = obligation.predicate.def_id();
+        let self_ty = self.infcx.shallow_resolve(
+            self.infcx.enter_forall_and_leak_universe(obligation.predicate.self_ty()),
+        );
+        let types = match tcx.as_lang_item(trait_def) {
+            Some(LangItem::Sized) => self.sizedness_conditions(self_ty, SizedTraitKind::Sized),
+            Some(LangItem::MetaSized) => {
+                self.sizedness_conditions(self_ty, SizedTraitKind::MetaSized)
+            }
+            Some(LangItem::PointeeSized) => {
+                bug!("`PointeeSized` is removing during lowering");
+            }
+            Some(LangItem::Copy | LangItem::Clone) => self.copy_clone_conditions(self_ty),
+            Some(LangItem::FusedIterator) => {
+                if self.coroutine_is_gen(self_ty) {
+                    ty::Binder::dummy(vec![])
+                } else {
+                    unreachable!("tried to assemble `FusedIterator` for non-gen coroutine");
                 }
-                Some(LangItem::Copy | LangItem::Clone) => self.copy_clone_conditions(obligation),
-                Some(LangItem::FusedIterator) => self.fused_iterator_conditions(obligation),
-                other => bug!("unexpected builtin trait {trait_def:?} ({other:?})"),
-            };
-            let BuiltinImplConditions::Where(types) = conditions else {
-                bug!("obligation {:?} had matched a builtin impl but now doesn't", obligation);
-            };
-            let types = self.infcx.enter_forall_and_leak_universe(types);
-
-            let cause = obligation.derived_cause(ObligationCauseCode::BuiltinDerived);
-            self.collect_predicates_for_types(
-                obligation.param_env,
-                cause,
-                obligation.recursion_depth + 1,
-                trait_def,
-                types,
-            )
-        } else {
-            PredicateObligations::new()
+            }
+            Some(
+                LangItem::Destruct
+                | LangItem::DiscriminantKind
+                | LangItem::FnPtrTrait
+                | LangItem::PointeeTrait
+                | LangItem::Tuple
+                | LangItem::Unpin,
+            ) => ty::Binder::dummy(vec![]),
+            other => bug!("unexpected builtin trait {trait_def:?} ({other:?})"),
         };
+        let types = self.infcx.enter_forall_and_leak_universe(types);
 
-        debug!(?obligations);
-
-        obligations
+        let cause = obligation.derived_cause(ObligationCauseCode::BuiltinDerived);
+        self.collect_predicates_for_types(
+            obligation.param_env,
+            cause,
+            obligation.recursion_depth + 1,
+            trait_def,
+            types,
+        )
     }
 
     #[instrument(level = "debug", skip(self))]
@@ -406,6 +409,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
 
             let self_ty =
                 obligation.predicate.self_ty().map_bound(|ty| self.infcx.shallow_resolve(ty));
+            let self_ty = self.infcx.enter_forall_and_leak_universe(self_ty);
 
             let types = self.constituent_types_for_ty(self_ty)?;
             let types = self.infcx.enter_forall_and_leak_universe(types);
diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs
index af3641c22ed..2e65750db25 100644
--- a/compiler/rustc_trait_selection/src/traits/select/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs
@@ -188,18 +188,6 @@ struct EvaluatedCandidate<'tcx> {
     evaluation: EvaluationResult,
 }
 
-/// When does the builtin impl for `T: Trait` apply?
-#[derive(Debug)]
-enum BuiltinImplConditions<'tcx> {
-    /// The impl is conditional on `T1, T2, ...: Trait`.
-    Where(ty::Binder<'tcx, Vec<Ty<'tcx>>>),
-    /// There is no built-in impl. There may be some other
-    /// candidate (a where-clause or user-defined impl).
-    None,
-    /// It is unknown whether there is an impl.
-    Ambiguous,
-}
-
 impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
     pub fn new(infcx: &'cx InferCtxt<'tcx>) -> SelectionContext<'cx, 'tcx> {
         SelectionContext {
@@ -1834,7 +1822,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
 
         // We prefer `Sized` candidates over everything.
         let mut sized_candidates =
-            candidates.iter().filter(|c| matches!(c.candidate, SizedCandidate { has_nested: _ }));
+            candidates.iter().filter(|c| matches!(c.candidate, SizedCandidate));
         if let Some(sized_candidate) = sized_candidates.next() {
             // There should only ever be a single sized candidate
             // as they would otherwise overlap.
@@ -1986,8 +1974,8 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
             // Don't use impl candidates which overlap with other candidates.
             // This should pretty much only ever happen with malformed impls.
             if candidates.iter().all(|c| match c.candidate {
-                SizedCandidate { has_nested: _ }
-                | BuiltinCandidate { has_nested: _ }
+                SizedCandidate
+                | BuiltinCandidate
                 | TransmutabilityCandidate
                 | AutoImplCandidate
                 | ClosureCandidate { .. }
@@ -2104,14 +2092,9 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
 impl<'tcx> SelectionContext<'_, 'tcx> {
     fn sizedness_conditions(
         &mut self,
-        obligation: &PolyTraitObligation<'tcx>,
+        self_ty: Ty<'tcx>,
         sizedness: SizedTraitKind,
-    ) -> BuiltinImplConditions<'tcx> {
-        use self::BuiltinImplConditions::{Ambiguous, None, Where};
-
-        // NOTE: binder moved to (*)
-        let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
-
+    ) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> {
         match self_ty.kind() {
             ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
             | ty::Uint(_)
@@ -2129,59 +2112,44 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
             | ty::Closure(..)
             | ty::CoroutineClosure(..)
             | ty::Never
-            | ty::Error(_) => {
-                // safe for everything
-                Where(ty::Binder::dummy(Vec::new()))
-            }
+            | ty::Error(_) => ty::Binder::dummy(vec![]),
 
             ty::Str | ty::Slice(_) | ty::Dynamic(..) => match sizedness {
-                SizedTraitKind::Sized => None,
-                SizedTraitKind::MetaSized => Where(ty::Binder::dummy(Vec::new())),
+                SizedTraitKind::Sized => unreachable!("tried to assemble `Sized` for unsized type"),
+                SizedTraitKind::MetaSized => ty::Binder::dummy(vec![]),
             },
 
-            ty::Foreign(..) => None,
+            ty::Foreign(..) => unreachable!("tried to assemble `Sized` for unsized type"),
 
-            ty::Tuple(tys) => Where(
-                obligation.predicate.rebind(tys.last().map_or_else(Vec::new, |&last| vec![last])),
-            ),
+            ty::Tuple(tys) => {
+                ty::Binder::dummy(tys.last().map_or_else(Vec::new, |&last| vec![last]))
+            }
 
-            ty::Pat(ty, _) => Where(obligation.predicate.rebind(vec![*ty])),
+            ty::Pat(ty, _) => ty::Binder::dummy(vec![*ty]),
 
             ty::Adt(def, args) => {
                 if let Some(crit) = def.sizedness_constraint(self.tcx(), sizedness) {
-                    // (*) binder moved here
-                    Where(obligation.predicate.rebind(vec![crit.instantiate(self.tcx(), args)]))
+                    ty::Binder::dummy(vec![crit.instantiate(self.tcx(), args)])
                 } else {
-                    Where(ty::Binder::dummy(Vec::new()))
+                    ty::Binder::dummy(vec![])
                 }
             }
 
-            // FIXME(unsafe_binders): This binder needs to be squashed
-            ty::UnsafeBinder(binder_ty) => Where(binder_ty.map_bound(|ty| vec![ty])),
-
-            ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => None,
-            ty::Infer(ty::TyVar(_)) => Ambiguous,
-
-            // We can make this an ICE if/once we actually instantiate the trait obligation eagerly.
-            ty::Bound(..) => None,
+            ty::UnsafeBinder(binder_ty) => binder_ty.map_bound(|ty| vec![ty]),
 
-            ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
-                bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
+            ty::Alias(..)
+            | ty::Param(_)
+            | ty::Placeholder(..)
+            | ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_))
+            | ty::Bound(..) => {
+                bug!("asked to assemble `Sized` of unexpected type: {:?}", self_ty);
             }
         }
     }
 
-    fn copy_clone_conditions(
-        &mut self,
-        obligation: &PolyTraitObligation<'tcx>,
-    ) -> BuiltinImplConditions<'tcx> {
-        // NOTE: binder moved to (*)
-        let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
-
-        use self::BuiltinImplConditions::{Ambiguous, None, Where};
-
+    fn copy_clone_conditions(&mut self, self_ty: Ty<'tcx>) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> {
         match *self_ty.kind() {
-            ty::FnDef(..) | ty::FnPtr(..) | ty::Error(_) => Where(ty::Binder::dummy(Vec::new())),
+            ty::FnDef(..) | ty::FnPtr(..) | ty::Error(_) => ty::Binder::dummy(vec![]),
 
             ty::Uint(_)
             | ty::Int(_)
@@ -2193,127 +2161,78 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
             | ty::Never
             | ty::Ref(_, _, hir::Mutability::Not)
             | ty::Array(..) => {
-                // Implementations provided in libcore
-                None
+                unreachable!("tried to assemble `Sized` for type with libcore-provided impl")
             }
 
             // FIXME(unsafe_binder): Should we conditionally
             // (i.e. universally) implement copy/clone?
-            ty::UnsafeBinder(_) => None,
-
-            ty::Dynamic(..)
-            | ty::Str
-            | ty::Slice(..)
-            | ty::Foreign(..)
-            | ty::Ref(_, _, hir::Mutability::Mut) => None,
+            ty::UnsafeBinder(_) => unreachable!("tried to assemble `Sized` for unsafe binder"),
 
             ty::Tuple(tys) => {
                 // (*) binder moved here
-                Where(obligation.predicate.rebind(tys.iter().collect()))
+                ty::Binder::dummy(tys.iter().collect())
             }
 
             ty::Pat(ty, _) => {
                 // (*) binder moved here
-                Where(obligation.predicate.rebind(vec![ty]))
+                ty::Binder::dummy(vec![ty])
             }
 
             ty::Coroutine(coroutine_def_id, args) => {
                 match self.tcx().coroutine_movability(coroutine_def_id) {
-                    hir::Movability::Static => None,
+                    hir::Movability::Static => {
+                        unreachable!("tried to assemble `Sized` for static coroutine")
+                    }
                     hir::Movability::Movable => {
                         if self.tcx().features().coroutine_clone() {
-                            let resolved_upvars =
-                                self.infcx.shallow_resolve(args.as_coroutine().tupled_upvars_ty());
-                            let resolved_witness =
-                                self.infcx.shallow_resolve(args.as_coroutine().witness());
-                            if resolved_upvars.is_ty_var() || resolved_witness.is_ty_var() {
-                                // Not yet resolved.
-                                Ambiguous
-                            } else {
-                                let all = args
-                                    .as_coroutine()
+                            ty::Binder::dummy(
+                                args.as_coroutine()
                                     .upvar_tys()
                                     .iter()
                                     .chain([args.as_coroutine().witness()])
-                                    .collect::<Vec<_>>();
-                                Where(obligation.predicate.rebind(all))
-                            }
+                                    .collect::<Vec<_>>(),
+                            )
                         } else {
-                            None
+                            unreachable!(
+                                "tried to assemble `Sized` for coroutine without enabled feature"
+                            )
                         }
                     }
                 }
             }
 
-            ty::CoroutineWitness(def_id, args) => {
-                let hidden_types = rebind_coroutine_witness_types(
-                    self.infcx.tcx,
-                    def_id,
-                    args,
-                    obligation.predicate.bound_vars(),
-                );
-                Where(hidden_types)
-            }
+            ty::CoroutineWitness(def_id, args) => self
+                .infcx
+                .tcx
+                .coroutine_hidden_types(def_id)
+                .instantiate(self.infcx.tcx, args)
+                .map_bound(|witness| witness.types.to_vec()),
 
-            ty::Closure(_, args) => {
-                // (*) binder moved here
-                let ty = self.infcx.shallow_resolve(args.as_closure().tupled_upvars_ty());
-                if let ty::Infer(ty::TyVar(_)) = ty.kind() {
-                    // Not yet resolved.
-                    Ambiguous
-                } else {
-                    Where(obligation.predicate.rebind(args.as_closure().upvar_tys().to_vec()))
-                }
-            }
+            ty::Closure(_, args) => ty::Binder::dummy(args.as_closure().upvar_tys().to_vec()),
 
             ty::CoroutineClosure(_, args) => {
-                // (*) binder moved here
-                let ty = self.infcx.shallow_resolve(args.as_coroutine_closure().tupled_upvars_ty());
-                if let ty::Infer(ty::TyVar(_)) = ty.kind() {
-                    // Not yet resolved.
-                    Ambiguous
-                } else {
-                    Where(
-                        obligation
-                            .predicate
-                            .rebind(args.as_coroutine_closure().upvar_tys().to_vec()),
-                    )
-                }
+                ty::Binder::dummy(args.as_coroutine_closure().upvar_tys().to_vec())
             }
 
-            ty::Adt(..) | ty::Alias(..) | ty::Param(..) | ty::Placeholder(..) => {
-                // Fallback to whatever user-defined impls exist in this case.
-                None
-            }
-
-            ty::Infer(ty::TyVar(_)) => {
-                // Unbound type variable. Might or might not have
-                // applicable impls and so forth, depending on what
-                // those type variables wind up being bound to.
-                Ambiguous
-            }
-
-            // We can make this an ICE if/once we actually instantiate the trait obligation eagerly.
-            ty::Bound(..) => None,
-
-            ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+            ty::Foreign(..)
+            | ty::Str
+            | ty::Slice(_)
+            | ty::Dynamic(..)
+            | ty::Adt(..)
+            | ty::Alias(..)
+            | ty::Param(..)
+            | ty::Placeholder(..)
+            | ty::Bound(..)
+            | ty::Ref(_, _, ty::Mutability::Mut)
+            | ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
                 bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
             }
         }
     }
 
-    fn fused_iterator_conditions(
-        &mut self,
-        obligation: &PolyTraitObligation<'tcx>,
-    ) -> BuiltinImplConditions<'tcx> {
-        let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
-        if let ty::Coroutine(did, ..) = *self_ty.kind()
-            && self.tcx().coroutine_is_gen(did)
-        {
-            BuiltinImplConditions::Where(ty::Binder::dummy(Vec::new()))
-        } else {
-            BuiltinImplConditions::None
-        }
+    fn coroutine_is_gen(&mut self, self_ty: Ty<'tcx>) -> bool {
+        matches!(*self_ty.kind(), ty::Coroutine(did, ..)
+            if self.tcx().coroutine_is_gen(did))
     }
 
     /// For default impls, we need to break apart a type into its
@@ -2330,9 +2249,9 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
     #[instrument(level = "debug", skip(self), ret)]
     fn constituent_types_for_ty(
         &self,
-        t: ty::Binder<'tcx, Ty<'tcx>>,
+        t: Ty<'tcx>,
     ) -> Result<ty::Binder<'tcx, Vec<Ty<'tcx>>>, SelectionError<'tcx>> {
-        Ok(match *t.skip_binder().kind() {
+        Ok(match *t.kind() {
             ty::Uint(_)
             | ty::Int(_)
             | ty::Bool
@@ -2349,8 +2268,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
             // `assemble_candidates_from_auto_impls`.
             ty::Foreign(..) => ty::Binder::dummy(Vec::new()),
 
-            // FIXME(unsafe_binders): Squash the double binder for now, I guess.
-            ty::UnsafeBinder(_) => return Err(SelectionError::Unimplemented),
+            ty::UnsafeBinder(ty) => ty.map_bound(|ty| vec![ty]),
 
             // Treat this like `struct str([u8]);`
             ty::Str => ty::Binder::dummy(vec![Ty::new_slice(self.tcx(), self.tcx().types.u8)]),
@@ -2364,40 +2282,47 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
                 bug!("asked to assemble constituent types of unexpected type: {:?}", t);
             }
 
-            ty::RawPtr(element_ty, _) | ty::Ref(_, element_ty, _) => t.rebind(vec![element_ty]),
+            ty::RawPtr(element_ty, _) | ty::Ref(_, element_ty, _) => {
+                ty::Binder::dummy(vec![element_ty])
+            }
 
-            ty::Pat(ty, _) | ty::Array(ty, _) | ty::Slice(ty) => t.rebind(vec![ty]),
+            ty::Pat(ty, _) | ty::Array(ty, _) | ty::Slice(ty) => ty::Binder::dummy(vec![ty]),
 
             ty::Tuple(tys) => {
                 // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
-                t.rebind(tys.iter().collect())
+                ty::Binder::dummy(tys.iter().collect())
             }
 
             ty::Closure(_, args) => {
                 let ty = self.infcx.shallow_resolve(args.as_closure().tupled_upvars_ty());
-                t.rebind(vec![ty])
+                ty::Binder::dummy(vec![ty])
             }
 
             ty::CoroutineClosure(_, args) => {
                 let ty = self.infcx.shallow_resolve(args.as_coroutine_closure().tupled_upvars_ty());
-                t.rebind(vec![ty])
+                ty::Binder::dummy(vec![ty])
             }
 
             ty::Coroutine(_, args) => {
                 let ty = self.infcx.shallow_resolve(args.as_coroutine().tupled_upvars_ty());
                 let witness = args.as_coroutine().witness();
-                t.rebind([ty].into_iter().chain(iter::once(witness)).collect())
+                ty::Binder::dummy([ty].into_iter().chain(iter::once(witness)).collect())
             }
 
-            ty::CoroutineWitness(def_id, args) => {
-                rebind_coroutine_witness_types(self.infcx.tcx, def_id, args, t.bound_vars())
-            }
+            ty::CoroutineWitness(def_id, args) => self
+                .infcx
+                .tcx
+                .coroutine_hidden_types(def_id)
+                .instantiate(self.infcx.tcx, args)
+                .map_bound(|witness| witness.types.to_vec()),
 
             // For `PhantomData<T>`, we pass `T`.
-            ty::Adt(def, args) if def.is_phantom_data() => t.rebind(args.types().collect()),
+            ty::Adt(def, args) if def.is_phantom_data() => {
+                ty::Binder::dummy(args.types().collect())
+            }
 
             ty::Adt(def, args) => {
-                t.rebind(def.all_fields().map(|f| f.ty(self.tcx(), args)).collect())
+                ty::Binder::dummy(def.all_fields().map(|f| f.ty(self.tcx(), args)).collect())
             }
 
             ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
@@ -2408,7 +2333,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
                     // which enforces a DAG between the functions requiring
                     // the auto trait bounds in question.
                     match self.tcx().type_of_opaque(def_id) {
-                        Ok(ty) => t.rebind(vec![ty.instantiate(self.tcx(), args)]),
+                        Ok(ty) => ty::Binder::dummy(vec![ty.instantiate(self.tcx(), args)]),
                         Err(_) => {
                             return Err(SelectionError::OpaqueTypeAutoTraitLeakageUnknown(def_id));
                         }
@@ -2880,23 +2805,6 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
     }
 }
 
-fn rebind_coroutine_witness_types<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    def_id: DefId,
-    args: ty::GenericArgsRef<'tcx>,
-    bound_vars: &'tcx ty::List<ty::BoundVariableKind>,
-) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> {
-    let bound_coroutine_types = tcx.coroutine_hidden_types(def_id).skip_binder();
-    let shifted_coroutine_types =
-        tcx.shift_bound_var_indices(bound_vars.len(), bound_coroutine_types.skip_binder());
-    ty::Binder::bind_with_vars(
-        ty::EarlyBinder::bind(shifted_coroutine_types.types.to_vec()).instantiate(tcx, args),
-        tcx.mk_bound_variable_kinds_from_iter(
-            bound_vars.iter().chain(bound_coroutine_types.bound_vars()),
-        ),
-    )
-}
-
 impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> {
     fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> {
         TraitObligationStackList::with(self)
diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs
index 150f5d118e0..3f83b4d50aa 100644
--- a/compiler/rustc_transmute/src/layout/tree.rs
+++ b/compiler/rustc_transmute/src/layout/tree.rs
@@ -296,7 +296,7 @@ pub(crate) mod rustc {
             }
 
             let target = cx.data_layout();
-            let pointer_size = target.pointer_size;
+            let pointer_size = target.pointer_size();
 
             match ty.kind() {
                 ty::Bool => Ok(Self::bool()),
diff --git a/compiler/rustc_ty_utils/src/assoc.rs b/compiler/rustc_ty_utils/src/assoc.rs
index 2fb3c5ff945..6a9461f2b43 100644
--- a/compiler/rustc_ty_utils/src/assoc.rs
+++ b/compiler/rustc_ty_utils/src/assoc.rs
@@ -177,6 +177,17 @@ impl<'tcx> Visitor<'tcx> for RPITVisitor<'tcx> {
     }
 }
 
+struct DisambiguatorIdxVisitor {
+    depth: u32,
+}
+
+impl<'tcx> Visitor<'tcx> for DisambiguatorIdxVisitor {
+    fn visit_opaque_ty(&mut self, opaque: &'tcx hir::OpaqueTy<'tcx>) -> Self::Result {
+        self.depth += 1;
+        intravisit::walk_opaque_ty(self, opaque)
+    }
+}
+
 /// Given an `fn_def_id` of a trait or a trait implementation:
 ///
 /// if `fn_def_id` is a function defined inside a trait, then it synthesizes
@@ -211,16 +222,19 @@ fn associated_types_for_impl_traits_in_associated_fn(
                 let disambiguator_idx = trait_item_refs
                     .iter()
                     .take_while(|item| item.id.owner_id.def_id != fn_def_id)
-                    .fold(0, |acc, item| {
-                        if !matches!(item.kind, hir::AssocItemKind::Fn { .. }) {
-                            acc
-                        } else if def_path_id(item.id.owner_id.def_id) == def_path_data {
-                            tcx.def_key(item.id.owner_id.def_id).disambiguated_data.disambiguator
-                                + 1
-                        } else {
-                            acc
-                        }
-                    });
+                    .filter(|item| {
+                        matches!(item.kind, hir::AssocItemKind::Fn { .. })
+                            && def_path_id(item.id.owner_id.def_id) == def_path_data
+                    })
+                    .last()
+                    .map(|item| {
+                        let output = tcx.hir_get_fn_output(item.id.owner_id.def_id).unwrap();
+                        let mut visitor = DisambiguatorIdxVisitor { depth: 0 };
+                        visitor.visit_fn_ret_ty(output);
+                        tcx.def_key(item.id.owner_id.def_id).disambiguated_data.disambiguator
+                            + visitor.depth
+                    })
+                    .unwrap_or_default();
 
                 let data = DefPathData::AnonAssocTy(def_path_data);
                 let mut visitor = RPITVisitor {
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index a225b712d4b..163e2b30883 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -379,7 +379,7 @@ fn layout_of_uncached<'tcx>(
 
         // Potentially-wide pointers.
         ty::Ref(_, pointee, _) | ty::RawPtr(pointee, _) => {
-            let mut data_ptr = scalar_unit(Pointer(AddressSpace::DATA));
+            let mut data_ptr = scalar_unit(Pointer(AddressSpace::ZERO));
             if !ty.is_raw_ptr() {
                 data_ptr.valid_range_mut().start = 1;
             }
@@ -435,7 +435,7 @@ fn layout_of_uncached<'tcx>(
                     }
                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
                     ty::Dynamic(..) => {
-                        let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
+                        let mut vtable = scalar_unit(Pointer(AddressSpace::ZERO));
                         vtable.valid_range_mut().start = 1;
                         vtable
                     }
diff --git a/compiler/stable_mir/src/mir/alloc.rs b/compiler/stable_mir/src/mir/alloc.rs
index 0d45e59885c..9a94551f3ec 100644
--- a/compiler/stable_mir/src/mir/alloc.rs
+++ b/compiler/stable_mir/src/mir/alloc.rs
@@ -23,6 +23,9 @@ pub enum GlobalAlloc {
     Static(StaticDef),
     /// The alloc ID points to memory.
     Memory(Allocation),
+    /// The first pointer-sized segment of a type id. On 64 bit systems, the 128 bit type id
+    /// is split into two segments, on 32 bit systems there are 4 segments, and so on.
+    TypeId { ty: Ty },
 }
 
 impl From<AllocId> for GlobalAlloc {
diff --git a/compiler/stable_mir/src/unstable/convert/stable/mir.rs b/compiler/stable_mir/src/unstable/convert/stable/mir.rs
index f6f4706e40b..ad39fc37600 100644
--- a/compiler/stable_mir/src/unstable/convert/stable/mir.rs
+++ b/compiler/stable_mir/src/unstable/convert/stable/mir.rs
@@ -864,6 +864,9 @@ impl<'tcx> Stable<'tcx> for mir::interpret::GlobalAlloc<'tcx> {
             mir::interpret::GlobalAlloc::Memory(alloc) => {
                 GlobalAlloc::Memory(alloc.stable(tables, cx))
             }
+            mir::interpret::GlobalAlloc::TypeId { ty } => {
+                GlobalAlloc::TypeId { ty: ty.stable(tables, cx) }
+            }
         }
     }
 }