about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--compiler/rustc_abi/src/layout.rs1872
-rw-r--r--compiler/rustc_abi/src/lib.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs4
-rw-r--r--compiler/rustc_codegen_ssa/src/back/linker.rs12
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs6
-rw-r--r--compiler/rustc_const_eval/src/util/check_validity_requirement.rs12
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0799.md19
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0800.md11
-rw-r--r--compiler/rustc_error_codes/src/lib.rs2
-rw-r--r--compiler/rustc_hir_analysis/src/check/compare_impl_item.rs15
-rw-r--r--compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs10
-rw-r--r--compiler/rustc_hir_analysis/src/errors/precise_captures.rs3
-rw-r--r--compiler/rustc_hir_typeck/src/coercion.rs2
-rw-r--r--compiler/rustc_hir_typeck/src/expr.rs47
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs17
-rw-r--r--compiler/rustc_hir_typeck/src/method/probe.rs2
-rw-r--r--compiler/rustc_hir_typeck/src/method/suggest.rs49
-rw-r--r--compiler/rustc_hir_typeck/src/pat.rs1
-rw-r--r--compiler/rustc_index/src/slice.rs2
-rw-r--r--compiler/rustc_llvm/build.rs5
-rw-r--r--compiler/rustc_metadata/src/lib.rs3
-rw-r--r--compiler/rustc_metadata/src/native_libs.rs38
-rw-r--r--compiler/rustc_middle/src/ty/generics.rs8
-rw-r--r--compiler/rustc_middle/src/ty/layout.rs60
-rw-r--r--compiler/rustc_middle/src/ty/util.rs5
-rw-r--r--compiler/rustc_mir_transform/src/shim.rs17
-rw-r--r--compiler/rustc_passes/src/layout_test.rs2
-rw-r--r--compiler/rustc_resolve/src/late.rs43
-rw-r--r--compiler/rustc_resolve/src/late/diagnostics.rs58
-rw-r--r--compiler/rustc_transmute/src/layout/mod.rs8
-rw-r--r--compiler/rustc_transmute/src/layout/tree.rs28
-rw-r--r--compiler/rustc_transmute/src/maybe_transmutable/mod.rs2
-rw-r--r--compiler/rustc_ty_utils/src/abi.rs58
-rw-r--r--compiler/rustc_ty_utils/src/layout.rs190
-rw-r--r--compiler/rustc_ty_utils/src/layout_sanity_check.rs25
-rw-r--r--library/core/src/fmt/mod.rs12
-rw-r--r--library/std/src/process.rs14
-rw-r--r--src/bootstrap/src/core/build_steps/doc.rs8
-rw-r--r--src/bootstrap/src/core/builder.rs1
-rw-r--r--src/bootstrap/src/lib.rs6
-rw-r--r--src/tools/miri/src/eval.rs2
-rw-r--r--src/tools/miri/src/machine.rs15
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/layout.rs65
-rw-r--r--src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs13
-rw-r--r--tests/assembly/simd/reduce-fadd-unordered.rs30
-rw-r--r--tests/crashes/124182.rs22
-rw-r--r--tests/crashes/126939.rs21
-rw-r--r--tests/crashes/129850.rs9
-rw-r--r--tests/crashes/130372-1.rs9
-rw-r--r--tests/crashes/130372-2.rs11
-rw-r--r--tests/crashes/130372-3.rs7
-rw-r--r--tests/crashes/130399.rs5
-rw-r--r--tests/ui/async-await/async-closures/closure-shim-borrowck-error.rs (renamed from tests/crashes/129262.rs)2
-rw-r--r--tests/ui/async-await/async-closures/closure-shim-borrowck-error.stderr24
-rw-r--r--tests/ui/async-await/async-closures/debuginfo-by-move-body.rs19
-rw-r--r--tests/ui/error-codes/E0799.rs4
-rw-r--r--tests/ui/error-codes/E0799.stderr9
-rw-r--r--tests/ui/error-codes/E0800.rs4
-rw-r--r--tests/ui/error-codes/E0800.stderr9
-rw-r--r--tests/ui/impl-trait/precise-capturing/bad-params.rs9
-rw-r--r--tests/ui/impl-trait/precise-capturing/bad-params.stderr28
-rw-r--r--tests/ui/impl-trait/precise-capturing/rpitit-captures-more-method-lifetimes.rs16
-rw-r--r--tests/ui/impl-trait/precise-capturing/rpitit-captures-more-method-lifetimes.stderr42
-rw-r--r--tests/ui/layout/debug.rs5
-rw-r--r--tests/ui/layout/debug.stderr14
-rw-r--r--tests/ui/layout/invalid-unsized-const-eval.rs14
-rw-r--r--tests/ui/layout/invalid-unsized-const-eval.stderr12
-rw-r--r--tests/ui/layout/invalid-unsized-const-prop.rs (renamed from tests/crashes/127737.rs)6
-rw-r--r--tests/ui/layout/invalid-unsized-in-always-sized-tail.rs17
-rw-r--r--tests/ui/layout/invalid-unsized-in-always-sized-tail.stderr23
-rw-r--r--tests/ui/layout/trivial-bounds-sized.rs51
-rw-r--r--tests/ui/layout/unsatisfiable-sized-ungated.rs (renamed from tests/crashes/123134.rs)11
-rw-r--r--tests/ui/methods/probe-error-on-infinite-deref.rs16
-rw-r--r--tests/ui/methods/probe-error-on-infinite-deref.stderr21
-rw-r--r--tests/ui/mismatched_types/mismatch-args-vargs-issue-130372.rs12
-rw-r--r--tests/ui/mismatched_types/mismatch-args-vargs-issue-130372.stderr19
-rw-r--r--tests/ui/resolve/auxiliary/foreign-trait-with-assoc.rs3
-rw-r--r--tests/ui/resolve/dont-compute-arg-names-for-non-fn.rs11
-rw-r--r--tests/ui/resolve/dont-compute-arg-names-for-non-fn.stderr14
80 files changed, 1917 insertions, 1396 deletions
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index 7432768be4a..f4de4e06d1b 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -1,4 +1,3 @@
-use std::borrow::{Borrow, Cow};
 use std::fmt::{self, Write};
 use std::ops::{Bound, Deref};
 use std::{cmp, iter};
@@ -7,8 +6,8 @@ use rustc_index::Idx;
 use tracing::debug;
 
 use crate::{
-    Abi, AbiAndPrefAlign, Align, FieldsShape, IndexSlice, IndexVec, Integer, LayoutS, Niche,
-    NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding, TargetDataLayout,
+    Abi, AbiAndPrefAlign, Align, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
+    LayoutS, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding,
     Variants, WrappingRange,
 };
 
@@ -30,19 +29,48 @@ where
     uninhabited && is_1zst
 }
 
-pub trait LayoutCalculator {
-    type TargetDataLayoutRef: Borrow<TargetDataLayout>;
+/// Determines towards which end of a struct layout optimizations will try to place the best niches.
+enum NicheBias {
+    Start,
+    End,
+}
 
-    fn delayed_bug(&self, txt: impl Into<Cow<'static, str>>);
-    fn current_data_layout(&self) -> Self::TargetDataLayoutRef;
+#[derive(Copy, Clone, Debug)]
+pub enum LayoutCalculatorError<F> {
+    /// An unsized type was found in a location where a sized type was expected.
+    ///
+    /// This is not always a compile error, for example if there is a `[T]: Sized`
+    /// bound in a where clause.
+    ///
+    /// Contains the field that was unexpectedly unsized.
+    UnexpectedUnsized(F),
+
+    /// A type was too large for the target platform.
+    SizeOverflow,
+
+    /// A union had no fields.
+    EmptyUnion,
+}
+
+type LayoutCalculatorResult<FieldIdx, VariantIdx, F> =
+    Result<LayoutS<FieldIdx, VariantIdx>, LayoutCalculatorError<F>>;
+
+#[derive(Clone, Copy, Debug)]
+pub struct LayoutCalculator<Cx> {
+    pub cx: Cx,
+}
+
+impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
+    pub fn new(cx: Cx) -> Self {
+        Self { cx }
+    }
 
-    fn scalar_pair<FieldIdx: Idx, VariantIdx: Idx>(
+    pub fn scalar_pair<FieldIdx: Idx, VariantIdx: Idx>(
         &self,
         a: Scalar,
         b: Scalar,
     ) -> LayoutS<FieldIdx, VariantIdx> {
-        let dl = self.current_data_layout();
-        let dl = dl.borrow();
+        let dl = self.cx.data_layout();
         let b_align = b.align(dl);
         let align = a.align(dl).max(b_align).max(dl.aggregate_align);
         let b_offset = a.size(dl).align_to(b_align.abi);
@@ -70,25 +98,25 @@ pub trait LayoutCalculator {
         }
     }
 
-    fn univariant<
+    pub fn univariant<
         'a,
         FieldIdx: Idx,
         VariantIdx: Idx,
-        F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
+        F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
     >(
         &self,
-        dl: &TargetDataLayout,
         fields: &IndexSlice<FieldIdx, F>,
         repr: &ReprOptions,
         kind: StructKind,
-    ) -> Option<LayoutS<FieldIdx, VariantIdx>> {
-        let layout = univariant(self, dl, fields, repr, kind, NicheBias::Start);
+    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
+        let dl = self.cx.data_layout();
+        let layout = self.univariant_biased(fields, repr, kind, NicheBias::Start);
         // Enums prefer niches close to the beginning or the end of the variants so that other
         // (smaller) data-carrying variants can be packed into the space after/before the niche.
         // If the default field ordering does not give us a niche at the front then we do a second
         // run and bias niches to the right and then check which one is closer to one of the
         // struct's edges.
-        if let Some(layout) = &layout {
+        if let Ok(layout) = &layout {
             // Don't try to calculate an end-biased layout for unsizable structs,
             // otherwise we could end up with different layouts for
             // Foo<Type> and Foo<dyn Trait> which would break unsizing.
@@ -102,7 +130,8 @@ pub trait LayoutCalculator {
                     // field (e.g. a trailing bool) and there is tail padding. But it's non-trivial
                     // to get the unpadded size so we try anyway.
                     if fields.len() > 1 && head_space != 0 && tail_space > 0 {
-                        let alt_layout = univariant(self, dl, fields, repr, kind, NicheBias::End)
+                        let alt_layout = self
+                            .univariant_biased(fields, repr, kind, NicheBias::End)
                             .expect("alt layout should always work");
                         let alt_niche = alt_layout
                             .largest_niche
@@ -130,12 +159,12 @@ pub trait LayoutCalculator {
                             alt_tail_space,
                             layout.fields.count(),
                             prefer_alt_layout,
-                            format_field_niches(layout, fields, dl),
-                            format_field_niches(&alt_layout, fields, dl),
+                            self.format_field_niches(layout, fields),
+                            self.format_field_niches(&alt_layout, fields),
                         );
 
                         if prefer_alt_layout {
-                            return Some(alt_layout);
+                            return Ok(alt_layout);
                         }
                     }
                 }
@@ -144,11 +173,10 @@ pub trait LayoutCalculator {
         layout
     }
 
-    fn layout_of_never_type<FieldIdx: Idx, VariantIdx: Idx>(
+    pub fn layout_of_never_type<FieldIdx: Idx, VariantIdx: Idx>(
         &self,
     ) -> LayoutS<FieldIdx, VariantIdx> {
-        let dl = self.current_data_layout();
-        let dl = dl.borrow();
+        let dl = self.cx.data_layout();
         LayoutS {
             variants: Variants::Single { index: VariantIdx::new(0) },
             fields: FieldsShape::Primitive,
@@ -161,11 +189,11 @@ pub trait LayoutCalculator {
         }
     }
 
-    fn layout_of_struct_or_enum<
+    pub fn layout_of_struct_or_enum<
         'a,
         FieldIdx: Idx,
         VariantIdx: Idx,
-        F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
+        F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
     >(
         &self,
         repr: &ReprOptions,
@@ -177,10 +205,7 @@ pub trait LayoutCalculator {
         discriminants: impl Iterator<Item = (VariantIdx, i128)>,
         dont_niche_optimize_enum: bool,
         always_sized: bool,
-    ) -> Option<LayoutS<FieldIdx, VariantIdx>> {
-        let dl = self.current_data_layout();
-        let dl = dl.borrow();
-
+    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
         let (present_first, present_second) = {
             let mut present_variants = variants
                 .iter_enumerated()
@@ -191,7 +216,7 @@ pub trait LayoutCalculator {
             Some(present_first) => present_first,
             // Uninhabited because it has no variants, or only absent ones.
             None if is_enum => {
-                return Some(self.layout_of_never_type());
+                return Ok(self.layout_of_never_type());
             }
             // If it's a struct, still compute a layout so that we can still compute the
             // field offsets.
@@ -203,15 +228,13 @@ pub trait LayoutCalculator {
             // or for optimizing univariant enums
             (present_second.is_none() && !repr.inhibit_enum_layout_opt())
         {
-            layout_of_struct(
-                self,
+            self.layout_of_struct(
                 repr,
                 variants,
                 is_enum,
                 is_unsafe_cell,
                 scalar_valid_range,
                 always_sized,
-                dl,
                 present_first,
             )
         } else {
@@ -219,30 +242,27 @@ pub trait LayoutCalculator {
             // structs. (We have also handled univariant enums
             // that allow representation optimization.)
             assert!(is_enum);
-            layout_of_enum(
-                self,
+            self.layout_of_enum(
                 repr,
                 variants,
                 discr_range_of_repr,
                 discriminants,
                 dont_niche_optimize_enum,
-                dl,
             )
         }
     }
 
-    fn layout_of_union<
+    pub fn layout_of_union<
         'a,
         FieldIdx: Idx,
         VariantIdx: Idx,
-        F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
+        F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
     >(
         &self,
         repr: &ReprOptions,
         variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
-    ) -> Option<LayoutS<FieldIdx, VariantIdx>> {
-        let dl = self.current_data_layout();
-        let dl = dl.borrow();
+    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
+        let dl = self.cx.data_layout();
         let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
         let mut max_repr_align = repr.align;
 
@@ -257,10 +277,11 @@ pub trait LayoutCalculator {
         };
 
         let mut size = Size::ZERO;
-        let only_variant = &variants[VariantIdx::new(0)];
+        let only_variant_idx = VariantIdx::new(0);
+        let only_variant = &variants[only_variant_idx];
         for field in only_variant {
             if field.is_unsized() {
-                self.delayed_bug("unsized field in union".to_string());
+                return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
             }
 
             align = align.max(field.align);
@@ -323,9 +344,13 @@ pub trait LayoutCalculator {
             }
         };
 
-        Some(LayoutS {
-            variants: Variants::Single { index: VariantIdx::new(0) },
-            fields: FieldsShape::Union(NonZeroUsize::new(only_variant.len())?),
+        let Some(union_field_count) = NonZeroUsize::new(only_variant.len()) else {
+            return Err(LayoutCalculatorError::EmptyUnion);
+        };
+
+        Ok(LayoutS {
+            variants: Variants::Single { index: only_variant_idx },
+            fields: FieldsShape::Union(union_field_count),
             abi,
             largest_niche: None,
             align,
@@ -334,986 +359,993 @@ pub trait LayoutCalculator {
             unadjusted_abi_align,
         })
     }
-}
 
-/// single-variant enums are just structs, if you think about it
-fn layout_of_struct<'a, LC, FieldIdx: Idx, VariantIdx: Idx, F>(
-    layout_calc: &LC,
-    repr: &ReprOptions,
-    variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
-    is_enum: bool,
-    is_unsafe_cell: bool,
-    scalar_valid_range: (Bound<u128>, Bound<u128>),
-    always_sized: bool,
-    dl: &TargetDataLayout,
-    present_first: VariantIdx,
-) -> Option<LayoutS<FieldIdx, VariantIdx>>
-where
-    LC: LayoutCalculator + ?Sized,
-    F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
-{
-    // Struct, or univariant enum equivalent to a struct.
-    // (Typechecking will reject discriminant-sizing attrs.)
-
-    let v = present_first;
-    let kind = if is_enum || variants[v].is_empty() || always_sized {
-        StructKind::AlwaysSized
-    } else {
-        StructKind::MaybeUnsized
-    };
-
-    let mut st = layout_calc.univariant(dl, &variants[v], repr, kind)?;
-    st.variants = Variants::Single { index: v };
-
-    if is_unsafe_cell {
-        let hide_niches = |scalar: &mut _| match scalar {
-            Scalar::Initialized { value, valid_range } => {
-                *valid_range = WrappingRange::full(value.size(dl))
-            }
-            // Already doesn't have any niches
-            Scalar::Union { .. } => {}
+    /// single-variant enums are just structs, if you think about it
+    fn layout_of_struct<
+        'a,
+        FieldIdx: Idx,
+        VariantIdx: Idx,
+        F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
+    >(
+        &self,
+        repr: &ReprOptions,
+        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
+        is_enum: bool,
+        is_unsafe_cell: bool,
+        scalar_valid_range: (Bound<u128>, Bound<u128>),
+        always_sized: bool,
+        present_first: VariantIdx,
+    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
+        // Struct, or univariant enum equivalent to a struct.
+        // (Typechecking will reject discriminant-sizing attrs.)
+
+        let dl = self.cx.data_layout();
+        let v = present_first;
+        let kind = if is_enum || variants[v].is_empty() || always_sized {
+            StructKind::AlwaysSized
+        } else {
+            StructKind::MaybeUnsized
         };
-        match &mut st.abi {
-            Abi::Uninhabited => {}
-            Abi::Scalar(scalar) => hide_niches(scalar),
-            Abi::ScalarPair(a, b) => {
-                hide_niches(a);
-                hide_niches(b);
+
+        let mut st = self.univariant(&variants[v], repr, kind)?;
+        st.variants = Variants::Single { index: v };
+
+        if is_unsafe_cell {
+            let hide_niches = |scalar: &mut _| match scalar {
+                Scalar::Initialized { value, valid_range } => {
+                    *valid_range = WrappingRange::full(value.size(dl))
+                }
+                // Already doesn't have any niches
+                Scalar::Union { .. } => {}
+            };
+            match &mut st.abi {
+                Abi::Uninhabited => {}
+                Abi::Scalar(scalar) => hide_niches(scalar),
+                Abi::ScalarPair(a, b) => {
+                    hide_niches(a);
+                    hide_niches(b);
+                }
+                Abi::Vector { element, count: _ } => hide_niches(element),
+                Abi::Aggregate { sized: _ } => {}
             }
-            Abi::Vector { element, count: _ } => hide_niches(element),
-            Abi::Aggregate { sized: _ } => {}
+            st.largest_niche = None;
+            return Ok(st);
         }
-        st.largest_niche = None;
-        return Some(st);
-    }
 
-    let (start, end) = scalar_valid_range;
-    match st.abi {
-        Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
-            // Enlarging validity ranges would result in missed
-            // optimizations, *not* wrongly assuming the inner
-            // value is valid. e.g. unions already enlarge validity ranges,
-            // because the values may be uninitialized.
-            //
-            // Because of that we only check that the start and end
-            // of the range is representable with this scalar type.
-
-            let max_value = scalar.size(dl).unsigned_int_max();
-            if let Bound::Included(start) = start {
-                // FIXME(eddyb) this might be incorrect - it doesn't
-                // account for wrap-around (end < start) ranges.
-                assert!(start <= max_value, "{start} > {max_value}");
-                scalar.valid_range_mut().start = start;
-            }
-            if let Bound::Included(end) = end {
-                // FIXME(eddyb) this might be incorrect - it doesn't
-                // account for wrap-around (end < start) ranges.
-                assert!(end <= max_value, "{end} > {max_value}");
-                scalar.valid_range_mut().end = end;
-            }
+        let (start, end) = scalar_valid_range;
+        match st.abi {
+            Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
+                // Enlarging validity ranges would result in missed
+                // optimizations, *not* wrongly assuming the inner
+                // value is valid. e.g. unions already enlarge validity ranges,
+                // because the values may be uninitialized.
+                //
+                // Because of that we only check that the start and end
+                // of the range is representable with this scalar type.
+
+                let max_value = scalar.size(dl).unsigned_int_max();
+                if let Bound::Included(start) = start {
+                    // FIXME(eddyb) this might be incorrect - it doesn't
+                    // account for wrap-around (end < start) ranges.
+                    assert!(start <= max_value, "{start} > {max_value}");
+                    scalar.valid_range_mut().start = start;
+                }
+                if let Bound::Included(end) = end {
+                    // FIXME(eddyb) this might be incorrect - it doesn't
+                    // account for wrap-around (end < start) ranges.
+                    assert!(end <= max_value, "{end} > {max_value}");
+                    scalar.valid_range_mut().end = end;
+                }
 
-            // Update `largest_niche` if we have introduced a larger niche.
-            let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
-            if let Some(niche) = niche {
-                match st.largest_niche {
-                    Some(largest_niche) => {
-                        // Replace the existing niche even if they're equal,
-                        // because this one is at a lower offset.
-                        if largest_niche.available(dl) <= niche.available(dl) {
-                            st.largest_niche = Some(niche);
+                // Update `largest_niche` if we have introduced a larger niche.
+                let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
+                if let Some(niche) = niche {
+                    match st.largest_niche {
+                        Some(largest_niche) => {
+                            // Replace the existing niche even if they're equal,
+                            // because this one is at a lower offset.
+                            if largest_niche.available(dl) <= niche.available(dl) {
+                                st.largest_niche = Some(niche);
+                            }
                         }
+                        None => st.largest_niche = Some(niche),
                     }
-                    None => st.largest_niche = Some(niche),
                 }
             }
+            _ => assert!(
+                start == Bound::Unbounded && end == Bound::Unbounded,
+                "nonscalar layout for layout_scalar_valid_range type: {st:#?}",
+            ),
         }
-        _ => assert!(
-            start == Bound::Unbounded && end == Bound::Unbounded,
-            "nonscalar layout for layout_scalar_valid_range type: {st:#?}",
-        ),
-    }
 
-    Some(st)
-}
-
-fn layout_of_enum<'a, LC, FieldIdx: Idx, VariantIdx: Idx, F>(
-    layout_calc: &LC,
-    repr: &ReprOptions,
-    variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
-    discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
-    discriminants: impl Iterator<Item = (VariantIdx, i128)>,
-    dont_niche_optimize_enum: bool,
-    dl: &TargetDataLayout,
-) -> Option<LayoutS<FieldIdx, VariantIdx>>
-where
-    LC: LayoutCalculator + ?Sized,
-    F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
-{
-    // Until we've decided whether to use the tagged or
-    // niche filling LayoutS, we don't want to intern the
-    // variant layouts, so we can't store them in the
-    // overall LayoutS. Store the overall LayoutS
-    // and the variant LayoutSs here until then.
-    struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
-        layout: LayoutS<FieldIdx, VariantIdx>,
-        variants: IndexVec<VariantIdx, LayoutS<FieldIdx, VariantIdx>>,
+        Ok(st)
     }
 
-    let calculate_niche_filling_layout = || -> Option<TmpLayout<FieldIdx, VariantIdx>> {
-        if dont_niche_optimize_enum {
-            return None;
+    fn layout_of_enum<
+        'a,
+        FieldIdx: Idx,
+        VariantIdx: Idx,
+        F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
+    >(
+        &self,
+        repr: &ReprOptions,
+        variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
+        discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
+        discriminants: impl Iterator<Item = (VariantIdx, i128)>,
+        dont_niche_optimize_enum: bool,
+    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
+        // Until we've decided whether to use the tagged or
+        // niche filling LayoutS, we don't want to intern the
+        // variant layouts, so we can't store them in the
+        // overall LayoutS. Store the overall LayoutS
+        // and the variant LayoutSs here until then.
+        struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
+            layout: LayoutS<FieldIdx, VariantIdx>,
+            variants: IndexVec<VariantIdx, LayoutS<FieldIdx, VariantIdx>>,
         }
 
-        if variants.len() < 2 {
-            return None;
-        }
+        let dl = self.cx.data_layout();
 
-        let mut align = dl.aggregate_align;
-        let mut max_repr_align = repr.align;
-        let mut unadjusted_abi_align = align.abi;
+        let calculate_niche_filling_layout = || -> Option<TmpLayout<FieldIdx, VariantIdx>> {
+            if dont_niche_optimize_enum {
+                return None;
+            }
 
-        let mut variant_layouts = variants
-            .iter_enumerated()
-            .map(|(j, v)| {
-                let mut st = layout_calc.univariant(dl, v, repr, StructKind::AlwaysSized)?;
-                st.variants = Variants::Single { index: j };
+            if variants.len() < 2 {
+                return None;
+            }
 
-                align = align.max(st.align);
-                max_repr_align = max_repr_align.max(st.max_repr_align);
-                unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
+            let mut align = dl.aggregate_align;
+            let mut max_repr_align = repr.align;
+            let mut unadjusted_abi_align = align.abi;
 
-                Some(st)
-            })
-            .collect::<Option<IndexVec<VariantIdx, _>>>()?;
+            let mut variant_layouts = variants
+                .iter_enumerated()
+                .map(|(j, v)| {
+                    let mut st = self.univariant(v, repr, StructKind::AlwaysSized).ok()?;
+                    st.variants = Variants::Single { index: j };
 
-        let largest_variant_index = variant_layouts
-            .iter_enumerated()
-            .max_by_key(|(_i, layout)| layout.size.bytes())
-            .map(|(i, _layout)| i)?;
-
-        let all_indices = variants.indices();
-        let needs_disc =
-            |index: VariantIdx| index != largest_variant_index && !absent(&variants[index]);
-        let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
-            ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
-
-        let count =
-            (niche_variants.end().index() as u128 - niche_variants.start().index() as u128) + 1;
-
-        // Find the field with the largest niche
-        let (field_index, niche, (niche_start, niche_scalar)) = variants[largest_variant_index]
-            .iter()
-            .enumerate()
-            .filter_map(|(j, field)| Some((j, field.largest_niche?)))
-            .max_by_key(|(_, niche)| niche.available(dl))
-            .and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))?;
-        let niche_offset =
-            niche.offset + variant_layouts[largest_variant_index].fields.offset(field_index);
-        let niche_size = niche.value.size(dl);
-        let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
-
-        let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
-            if i == largest_variant_index {
-                return true;
-            }
+                    align = align.max(st.align);
+                    max_repr_align = max_repr_align.max(st.max_repr_align);
+                    unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
 
-            layout.largest_niche = None;
+                    Some(st)
+                })
+                .collect::<Option<IndexVec<VariantIdx, _>>>()?;
 
-            if layout.size <= niche_offset {
-                // This variant will fit before the niche.
-                return true;
-            }
+            let largest_variant_index = variant_layouts
+                .iter_enumerated()
+                .max_by_key(|(_i, layout)| layout.size.bytes())
+                .map(|(i, _layout)| i)?;
 
-            // Determine if it'll fit after the niche.
-            let this_align = layout.align.abi;
-            let this_offset = (niche_offset + niche_size).align_to(this_align);
+            let all_indices = variants.indices();
+            let needs_disc =
+                |index: VariantIdx| index != largest_variant_index && !absent(&variants[index]);
+            let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
+                ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
 
-            if this_offset + layout.size > size {
-                return false;
-            }
+            let count =
+                (niche_variants.end().index() as u128 - niche_variants.start().index() as u128) + 1;
 
-            // It'll fit, but we need to make some adjustments.
-            match layout.fields {
-                FieldsShape::Arbitrary { ref mut offsets, .. } => {
-                    for offset in offsets.iter_mut() {
-                        *offset += this_offset;
-                    }
-                }
-                FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
-                    panic!("Layout of fields should be Arbitrary for variants")
+            // Find the field with the largest niche
+            let (field_index, niche, (niche_start, niche_scalar)) = variants[largest_variant_index]
+                .iter()
+                .enumerate()
+                .filter_map(|(j, field)| Some((j, field.largest_niche?)))
+                .max_by_key(|(_, niche)| niche.available(dl))
+                .and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))?;
+            let niche_offset =
+                niche.offset + variant_layouts[largest_variant_index].fields.offset(field_index);
+            let niche_size = niche.value.size(dl);
+            let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
+
+            let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
+                if i == largest_variant_index {
+                    return true;
                 }
-            }
 
-            // It can't be a Scalar or ScalarPair because the offset isn't 0.
-            if !layout.abi.is_uninhabited() {
-                layout.abi = Abi::Aggregate { sized: true };
-            }
-            layout.size += this_offset;
+                layout.largest_niche = None;
 
-            true
-        });
+                if layout.size <= niche_offset {
+                    // This variant will fit before the niche.
+                    return true;
+                }
 
-        if !all_variants_fit {
-            return None;
-        }
+                // Determine if it'll fit after the niche.
+                let this_align = layout.align.abi;
+                let this_offset = (niche_offset + niche_size).align_to(this_align);
 
-        let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
+                if this_offset + layout.size > size {
+                    return false;
+                }
 
-        let others_zst = variant_layouts
-            .iter_enumerated()
-            .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
-        let same_size = size == variant_layouts[largest_variant_index].size;
-        let same_align = align == variant_layouts[largest_variant_index].align;
-
-        let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
-            Abi::Uninhabited
-        } else if same_size && same_align && others_zst {
-            match variant_layouts[largest_variant_index].abi {
-                // When the total alignment and size match, we can use the
-                // same ABI as the scalar variant with the reserved niche.
-                Abi::Scalar(_) => Abi::Scalar(niche_scalar),
-                Abi::ScalarPair(first, second) => {
-                    // Only the niche is guaranteed to be initialised,
-                    // so use union layouts for the other primitive.
-                    if niche_offset == Size::ZERO {
-                        Abi::ScalarPair(niche_scalar, second.to_union())
-                    } else {
-                        Abi::ScalarPair(first.to_union(), niche_scalar)
+                // It'll fit, but we need to make some adjustments.
+                match layout.fields {
+                    FieldsShape::Arbitrary { ref mut offsets, .. } => {
+                        for offset in offsets.iter_mut() {
+                            *offset += this_offset;
+                        }
                     }
+                    FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
+                        panic!("Layout of fields should be Arbitrary for variants")
+                    }
+                }
+
+                // It can't be a Scalar or ScalarPair because the offset isn't 0.
+                if !layout.abi.is_uninhabited() {
+                    layout.abi = Abi::Aggregate { sized: true };
                 }
-                _ => Abi::Aggregate { sized: true },
+                layout.size += this_offset;
+
+                true
+            });
+
+            if !all_variants_fit {
+                return None;
             }
-        } else {
-            Abi::Aggregate { sized: true }
-        };
 
-        let layout = LayoutS {
-            variants: Variants::Multiple {
-                tag: niche_scalar,
-                tag_encoding: TagEncoding::Niche {
-                    untagged_variant: largest_variant_index,
-                    niche_variants,
-                    niche_start,
+            let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
+
+            let others_zst = variant_layouts
+                .iter_enumerated()
+                .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
+            let same_size = size == variant_layouts[largest_variant_index].size;
+            let same_align = align == variant_layouts[largest_variant_index].align;
+
+            let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
+                Abi::Uninhabited
+            } else if same_size && same_align && others_zst {
+                match variant_layouts[largest_variant_index].abi {
+                    // When the total alignment and size match, we can use the
+                    // same ABI as the scalar variant with the reserved niche.
+                    Abi::Scalar(_) => Abi::Scalar(niche_scalar),
+                    Abi::ScalarPair(first, second) => {
+                        // Only the niche is guaranteed to be initialised,
+                        // so use union layouts for the other primitive.
+                        if niche_offset == Size::ZERO {
+                            Abi::ScalarPair(niche_scalar, second.to_union())
+                        } else {
+                            Abi::ScalarPair(first.to_union(), niche_scalar)
+                        }
+                    }
+                    _ => Abi::Aggregate { sized: true },
+                }
+            } else {
+                Abi::Aggregate { sized: true }
+            };
+
+            let layout = LayoutS {
+                variants: Variants::Multiple {
+                    tag: niche_scalar,
+                    tag_encoding: TagEncoding::Niche {
+                        untagged_variant: largest_variant_index,
+                        niche_variants,
+                        niche_start,
+                    },
+                    tag_field: 0,
+                    variants: IndexVec::new(),
                 },
-                tag_field: 0,
-                variants: IndexVec::new(),
-            },
-            fields: FieldsShape::Arbitrary {
-                offsets: [niche_offset].into(),
-                memory_index: [0].into(),
-            },
-            abi,
-            largest_niche,
-            size,
-            align,
-            max_repr_align,
-            unadjusted_abi_align,
-        };
+                fields: FieldsShape::Arbitrary {
+                    offsets: [niche_offset].into(),
+                    memory_index: [0].into(),
+                },
+                abi,
+                largest_niche,
+                size,
+                align,
+                max_repr_align,
+                unadjusted_abi_align,
+            };
 
-        Some(TmpLayout { layout, variants: variant_layouts })
-    };
+            Some(TmpLayout { layout, variants: variant_layouts })
+        };
 
-    let niche_filling_layout = calculate_niche_filling_layout();
+        let niche_filling_layout = calculate_niche_filling_layout();
 
-    let (mut min, mut max) = (i128::MAX, i128::MIN);
-    let discr_type = repr.discr_type();
-    let bits = Integer::from_attr(dl, discr_type).size().bits();
-    for (i, mut val) in discriminants {
-        if !repr.c() && variants[i].iter().any(|f| f.abi.is_uninhabited()) {
-            continue;
-        }
-        if discr_type.is_signed() {
-            // sign extend the raw representation to be an i128
-            val = (val << (128 - bits)) >> (128 - bits);
-        }
-        if val < min {
-            min = val;
+        let (mut min, mut max) = (i128::MAX, i128::MIN);
+        let discr_type = repr.discr_type();
+        let bits = Integer::from_attr(dl, discr_type).size().bits();
+        for (i, mut val) in discriminants {
+            if !repr.c() && variants[i].iter().any(|f| f.abi.is_uninhabited()) {
+                continue;
+            }
+            if discr_type.is_signed() {
+                // sign extend the raw representation to be an i128
+                val = (val << (128 - bits)) >> (128 - bits);
+            }
+            if val < min {
+                min = val;
+            }
+            if val > max {
+                max = val;
+            }
         }
-        if val > max {
-            max = val;
+        // We might have no inhabited variants, so pretend there's at least one.
+        if (min, max) == (i128::MAX, i128::MIN) {
+            min = 0;
+            max = 0;
         }
-    }
-    // We might have no inhabited variants, so pretend there's at least one.
-    if (min, max) == (i128::MAX, i128::MIN) {
-        min = 0;
-        max = 0;
-    }
-    assert!(min <= max, "discriminant range is {min}...{max}");
-    let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::repr_discr(tcx, ty, &repr, min, max);
-
-    let mut align = dl.aggregate_align;
-    let mut max_repr_align = repr.align;
-    let mut unadjusted_abi_align = align.abi;
-
-    let mut size = Size::ZERO;
-
-    // We're interested in the smallest alignment, so start large.
-    let mut start_align = Align::from_bytes(256).unwrap();
-    assert_eq!(Integer::for_align(dl, start_align), None);
-
-    // repr(C) on an enum tells us to make a (tag, union) layout,
-    // so we need to grow the prefix alignment to be at least
-    // the alignment of the union. (This value is used both for
-    // determining the alignment of the overall enum, and the
-    // determining the alignment of the payload after the tag.)
-    let mut prefix_align = min_ity.align(dl).abi;
-    if repr.c() {
-        for fields in variants {
-            for field in fields {
-                prefix_align = prefix_align.max(field.align.abi);
+        assert!(min <= max, "discriminant range is {min}...{max}");
+        let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::repr_discr(tcx, ty, &repr, min, max);
+
+        let mut align = dl.aggregate_align;
+        let mut max_repr_align = repr.align;
+        let mut unadjusted_abi_align = align.abi;
+
+        let mut size = Size::ZERO;
+
+        // We're interested in the smallest alignment, so start large.
+        let mut start_align = Align::from_bytes(256).unwrap();
+        assert_eq!(Integer::for_align(dl, start_align), None);
+
+        // repr(C) on an enum tells us to make a (tag, union) layout,
+        // so we need to grow the prefix alignment to be at least
+        // the alignment of the union. (This value is used both for
+        // determining the alignment of the overall enum, and the
+        // determining the alignment of the payload after the tag.)
+        let mut prefix_align = min_ity.align(dl).abi;
+        if repr.c() {
+            for fields in variants {
+                for field in fields {
+                    prefix_align = prefix_align.max(field.align.abi);
+                }
             }
         }
-    }
 
-    // Create the set of structs that represent each variant.
-    let mut layout_variants = variants
-        .iter_enumerated()
-        .map(|(i, field_layouts)| {
-            let mut st = layout_calc.univariant(
-                dl,
-                field_layouts,
-                repr,
-                StructKind::Prefixed(min_ity.size(), prefix_align),
-            )?;
-            st.variants = Variants::Single { index: i };
-            // Find the first field we can't move later
-            // to make room for a larger discriminant.
-            for field_idx in st.fields.index_by_increasing_offset() {
-                let field = &field_layouts[FieldIdx::new(field_idx)];
-                if !field.is_1zst() {
-                    start_align = start_align.min(field.align.abi);
-                    break;
+        // Create the set of structs that represent each variant.
+        let mut layout_variants = variants
+            .iter_enumerated()
+            .map(|(i, field_layouts)| {
+                let mut st = self.univariant(
+                    field_layouts,
+                    repr,
+                    StructKind::Prefixed(min_ity.size(), prefix_align),
+                )?;
+                st.variants = Variants::Single { index: i };
+                // Find the first field we can't move later
+                // to make room for a larger discriminant.
+                for field_idx in st.fields.index_by_increasing_offset() {
+                    let field = &field_layouts[FieldIdx::new(field_idx)];
+                    if !field.is_1zst() {
+                        start_align = start_align.min(field.align.abi);
+                        break;
+                    }
                 }
-            }
-            size = cmp::max(size, st.size);
-            align = align.max(st.align);
-            max_repr_align = max_repr_align.max(st.max_repr_align);
-            unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
-            Some(st)
-        })
-        .collect::<Option<IndexVec<VariantIdx, _>>>()?;
+                size = cmp::max(size, st.size);
+                align = align.max(st.align);
+                max_repr_align = max_repr_align.max(st.max_repr_align);
+                unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
+                Ok(st)
+            })
+            .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
 
-    // Align the maximum variant size to the largest alignment.
-    size = size.align_to(align.abi);
+        // Align the maximum variant size to the largest alignment.
+        size = size.align_to(align.abi);
 
-    // FIXME(oli-obk): deduplicate and harden these checks
-    if size.bytes() >= dl.obj_size_bound() {
-        return None;
-    }
+        // FIXME(oli-obk): deduplicate and harden these checks
+        if size.bytes() >= dl.obj_size_bound() {
+            return Err(LayoutCalculatorError::SizeOverflow);
+        }
 
-    let typeck_ity = Integer::from_attr(dl, repr.discr_type());
-    if typeck_ity < min_ity {
-        // It is a bug if Layout decided on a greater discriminant size than typeck for
-        // some reason at this point (based on values discriminant can take on). Mostly
-        // because this discriminant will be loaded, and then stored into variable of
-        // type calculated by typeck. Consider such case (a bug): typeck decided on
-        // byte-sized discriminant, but layout thinks we need a 16-bit to store all
-        // discriminant values. That would be a bug, because then, in codegen, in order
-        // to store this 16-bit discriminant into 8-bit sized temporary some of the
-        // space necessary to represent would have to be discarded (or layout is wrong
-        // on thinking it needs 16 bits)
-        panic!(
-            "layout decided on a larger discriminant type ({min_ity:?}) than typeck ({typeck_ity:?})"
-        );
-        // However, it is fine to make discr type however large (as an optimisation)
-        // after this point – we’ll just truncate the value we load in codegen.
-    }
+        let typeck_ity = Integer::from_attr(dl, repr.discr_type());
+        if typeck_ity < min_ity {
+            // It is a bug if Layout decided on a greater discriminant size than typeck for
+            // some reason at this point (based on values discriminant can take on). Mostly
+            // because this discriminant will be loaded, and then stored into variable of
+            // type calculated by typeck. Consider such case (a bug): typeck decided on
+            // byte-sized discriminant, but layout thinks we need a 16-bit to store all
+            // discriminant values. That would be a bug, because then, in codegen, in order
+            // to store this 16-bit discriminant into 8-bit sized temporary some of the
+            // space necessary to represent would have to be discarded (or layout is wrong
+            // on thinking it needs 16 bits)
+            panic!(
+                "layout decided on a larger discriminant type ({min_ity:?}) than typeck ({typeck_ity:?})"
+            );
+            // However, it is fine to make discr type however large (as an optimisation)
+            // after this point – we’ll just truncate the value we load in codegen.
+        }
+
+        // Check to see if we should use a different type for the
+        // discriminant. We can safely use a type with the same size
+        // as the alignment of the first field of each variant.
+        // We increase the size of the discriminant to avoid LLVM copying
+        // padding when it doesn't need to. This normally causes unaligned
+        // load/stores and excessive memcpy/memset operations. By using a
+        // bigger integer size, LLVM can be sure about its contents and
+        // won't be so conservative.
+
+        // Use the initial field alignment
+        let mut ity = if repr.c() || repr.int.is_some() {
+            min_ity
+        } else {
+            Integer::for_align(dl, start_align).unwrap_or(min_ity)
+        };
 
-    // Check to see if we should use a different type for the
-    // discriminant. We can safely use a type with the same size
-    // as the alignment of the first field of each variant.
-    // We increase the size of the discriminant to avoid LLVM copying
-    // padding when it doesn't need to. This normally causes unaligned
-    // load/stores and excessive memcpy/memset operations. By using a
-    // bigger integer size, LLVM can be sure about its contents and
-    // won't be so conservative.
-
-    // Use the initial field alignment
-    let mut ity = if repr.c() || repr.int.is_some() {
-        min_ity
-    } else {
-        Integer::for_align(dl, start_align).unwrap_or(min_ity)
-    };
-
-    // If the alignment is not larger than the chosen discriminant size,
-    // don't use the alignment as the final size.
-    if ity <= min_ity {
-        ity = min_ity;
-    } else {
-        // Patch up the variants' first few fields.
-        let old_ity_size = min_ity.size();
-        let new_ity_size = ity.size();
-        for variant in &mut layout_variants {
-            match variant.fields {
-                FieldsShape::Arbitrary { ref mut offsets, .. } => {
-                    for i in offsets {
-                        if *i <= old_ity_size {
-                            assert_eq!(*i, old_ity_size);
-                            *i = new_ity_size;
+        // If the alignment is not larger than the chosen discriminant size,
+        // don't use the alignment as the final size.
+        if ity <= min_ity {
+            ity = min_ity;
+        } else {
+            // Patch up the variants' first few fields.
+            let old_ity_size = min_ity.size();
+            let new_ity_size = ity.size();
+            for variant in &mut layout_variants {
+                match variant.fields {
+                    FieldsShape::Arbitrary { ref mut offsets, .. } => {
+                        for i in offsets {
+                            if *i <= old_ity_size {
+                                assert_eq!(*i, old_ity_size);
+                                *i = new_ity_size;
+                            }
+                        }
+                        // We might be making the struct larger.
+                        if variant.size <= old_ity_size {
+                            variant.size = new_ity_size;
                         }
                     }
-                    // We might be making the struct larger.
-                    if variant.size <= old_ity_size {
-                        variant.size = new_ity_size;
+                    FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
+                        panic!("encountered a non-arbitrary layout during enum layout")
                     }
                 }
-                FieldsShape::Primitive | FieldsShape::Array { .. } | FieldsShape::Union(..) => {
-                    panic!("encountered a non-arbitrary layout during enum layout")
-                }
             }
         }
-    }
 
-    let tag_mask = ity.size().unsigned_int_max();
-    let tag = Scalar::Initialized {
-        value: Primitive::Int(ity, signed),
-        valid_range: WrappingRange {
-            start: (min as u128 & tag_mask),
-            end: (max as u128 & tag_mask),
-        },
-    };
-    let mut abi = Abi::Aggregate { sized: true };
-
-    if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
-        abi = Abi::Uninhabited;
-    } else if tag.size(dl) == size {
-        // Make sure we only use scalar layout when the enum is entirely its
-        // own tag (i.e. it has no padding nor any non-ZST variant fields).
-        abi = Abi::Scalar(tag);
-    } else {
-        // Try to use a ScalarPair for all tagged enums.
-        // That's possible only if we can find a common primitive type for all variants.
-        let mut common_prim = None;
-        let mut common_prim_initialized_in_all_variants = true;
-        for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {
-            let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
-                panic!("encountered a non-arbitrary layout during enum layout");
-            };
-            // We skip *all* ZST here and later check if we are good in terms of alignment.
-            // This lets us handle some cases involving aligned ZST.
-            let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
-            let (field, offset) = match (fields.next(), fields.next()) {
-                (None, None) => {
-                    common_prim_initialized_in_all_variants = false;
-                    continue;
-                }
-                (Some(pair), None) => pair,
-                _ => {
-                    common_prim = None;
-                    break;
-                }
-            };
-            let prim = match field.abi {
-                Abi::Scalar(scalar) => {
-                    common_prim_initialized_in_all_variants &=
-                        matches!(scalar, Scalar::Initialized { .. });
-                    scalar.primitive()
-                }
-                _ => {
-                    common_prim = None;
-                    break;
-                }
-            };
-            if let Some((old_prim, common_offset)) = common_prim {
-                // All variants must be at the same offset
-                if offset != common_offset {
-                    common_prim = None;
-                    break;
-                }
-                // This is pretty conservative. We could go fancier
-                // by realising that (u8, u8) could just cohabit with
-                // u16 or even u32.
-                let new_prim = match (old_prim, prim) {
-                    // Allow all identical primitives.
-                    (x, y) if x == y => x,
-                    // Allow integers of the same size with differing signedness.
-                    // We arbitrarily choose the signedness of the first variant.
-                    (p @ Primitive::Int(x, _), Primitive::Int(y, _)) if x == y => p,
-                    // Allow integers mixed with pointers of the same layout.
-                    // We must represent this using a pointer, to avoid
-                    // roundtripping pointers through ptrtoint/inttoptr.
-                    (p @ Primitive::Pointer(_), i @ Primitive::Int(..))
-                    | (i @ Primitive::Int(..), p @ Primitive::Pointer(_))
-                        if p.size(dl) == i.size(dl) && p.align(dl) == i.align(dl) =>
-                    {
-                        p
+        let tag_mask = ity.size().unsigned_int_max();
+        let tag = Scalar::Initialized {
+            value: Primitive::Int(ity, signed),
+            valid_range: WrappingRange {
+                start: (min as u128 & tag_mask),
+                end: (max as u128 & tag_mask),
+            },
+        };
+        let mut abi = Abi::Aggregate { sized: true };
+
+        if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
+            abi = Abi::Uninhabited;
+        } else if tag.size(dl) == size {
+            // Make sure we only use scalar layout when the enum is entirely its
+            // own tag (i.e. it has no padding nor any non-ZST variant fields).
+            abi = Abi::Scalar(tag);
+        } else {
+            // Try to use a ScalarPair for all tagged enums.
+            // That's possible only if we can find a common primitive type for all variants.
+            let mut common_prim = None;
+            let mut common_prim_initialized_in_all_variants = true;
+            for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {
+                let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
+                    panic!("encountered a non-arbitrary layout during enum layout");
+                };
+                // We skip *all* ZST here and later check if we are good in terms of alignment.
+                // This lets us handle some cases involving aligned ZST.
+                let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
+                let (field, offset) = match (fields.next(), fields.next()) {
+                    (None, None) => {
+                        common_prim_initialized_in_all_variants = false;
+                        continue;
                     }
+                    (Some(pair), None) => pair,
                     _ => {
                         common_prim = None;
                         break;
                     }
                 };
-                // We may be updating the primitive here, for example from int->ptr.
-                common_prim = Some((new_prim, common_offset));
-            } else {
-                common_prim = Some((prim, offset));
+                let prim = match field.abi {
+                    Abi::Scalar(scalar) => {
+                        common_prim_initialized_in_all_variants &=
+                            matches!(scalar, Scalar::Initialized { .. });
+                        scalar.primitive()
+                    }
+                    _ => {
+                        common_prim = None;
+                        break;
+                    }
+                };
+                if let Some((old_prim, common_offset)) = common_prim {
+                    // All variants must be at the same offset
+                    if offset != common_offset {
+                        common_prim = None;
+                        break;
+                    }
+                    // This is pretty conservative. We could go fancier
+                    // by realising that (u8, u8) could just cohabit with
+                    // u16 or even u32.
+                    let new_prim = match (old_prim, prim) {
+                        // Allow all identical primitives.
+                        (x, y) if x == y => x,
+                        // Allow integers of the same size with differing signedness.
+                        // We arbitrarily choose the signedness of the first variant.
+                        (p @ Primitive::Int(x, _), Primitive::Int(y, _)) if x == y => p,
+                        // Allow integers mixed with pointers of the same layout.
+                        // We must represent this using a pointer, to avoid
+                        // roundtripping pointers through ptrtoint/inttoptr.
+                        (p @ Primitive::Pointer(_), i @ Primitive::Int(..))
+                        | (i @ Primitive::Int(..), p @ Primitive::Pointer(_))
+                            if p.size(dl) == i.size(dl) && p.align(dl) == i.align(dl) =>
+                        {
+                            p
+                        }
+                        _ => {
+                            common_prim = None;
+                            break;
+                        }
+                    };
+                    // We may be updating the primitive here, for example from int->ptr.
+                    common_prim = Some((new_prim, common_offset));
+                } else {
+                    common_prim = Some((prim, offset));
+                }
             }
-        }
-        if let Some((prim, offset)) = common_prim {
-            let prim_scalar = if common_prim_initialized_in_all_variants {
-                let size = prim.size(dl);
-                assert!(size.bits() <= 128);
-                Scalar::Initialized { value: prim, valid_range: WrappingRange::full(size) }
-            } else {
-                // Common prim might be uninit.
-                Scalar::Union { value: prim }
-            };
-            let pair = layout_calc.scalar_pair::<FieldIdx, VariantIdx>(tag, prim_scalar);
-            let pair_offsets = match pair.fields {
-                FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
-                    assert_eq!(memory_index.raw, [0, 1]);
-                    offsets
+            if let Some((prim, offset)) = common_prim {
+                let prim_scalar = if common_prim_initialized_in_all_variants {
+                    let size = prim.size(dl);
+                    assert!(size.bits() <= 128);
+                    Scalar::Initialized { value: prim, valid_range: WrappingRange::full(size) }
+                } else {
+                    // Common prim might be uninit.
+                    Scalar::Union { value: prim }
+                };
+                let pair = self.scalar_pair::<FieldIdx, VariantIdx>(tag, prim_scalar);
+                let pair_offsets = match pair.fields {
+                    FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+                        assert_eq!(memory_index.raw, [0, 1]);
+                        offsets
+                    }
+                    _ => panic!("encountered a non-arbitrary layout during enum layout"),
+                };
+                if pair_offsets[FieldIdx::new(0)] == Size::ZERO
+                    && pair_offsets[FieldIdx::new(1)] == *offset
+                    && align == pair.align
+                    && size == pair.size
+                {
+                    // We can use `ScalarPair` only when it matches our
+                    // already computed layout (including `#[repr(C)]`).
+                    abi = pair.abi;
                 }
-                _ => panic!("encountered a non-arbitrary layout during enum layout"),
-            };
-            if pair_offsets[FieldIdx::new(0)] == Size::ZERO
-                && pair_offsets[FieldIdx::new(1)] == *offset
-                && align == pair.align
-                && size == pair.size
-            {
-                // We can use `ScalarPair` only when it matches our
-                // already computed layout (including `#[repr(C)]`).
-                abi = pair.abi;
             }
         }
-    }
 
-    // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
-    // variants to ensure they are consistent. This is because a downcast is
-    // semantically a NOP, and thus should not affect layout.
-    if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
-        for variant in &mut layout_variants {
-            // We only do this for variants with fields; the others are not accessed anyway.
-            // Also do not overwrite any already existing "clever" ABIs.
-            if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
-                variant.abi = abi;
-                // Also need to bump up the size and alignment, so that the entire value fits
-                // in here.
-                variant.size = cmp::max(variant.size, size);
-                variant.align.abi = cmp::max(variant.align.abi, align.abi);
+        // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
+        // variants to ensure they are consistent. This is because a downcast is
+        // semantically a NOP, and thus should not affect layout.
+        if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
+            for variant in &mut layout_variants {
+                // We only do this for variants with fields; the others are not accessed anyway.
+                // Also do not overwrite any already existing "clever" ABIs.
+                if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
+                    variant.abi = abi;
+                    // Also need to bump up the size and alignment, so that the entire value fits
+                    // in here.
+                    variant.size = cmp::max(variant.size, size);
+                    variant.align.abi = cmp::max(variant.align.abi, align.abi);
+                }
             }
         }
-    }
 
-    let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
-
-    let tagged_layout = LayoutS {
-        variants: Variants::Multiple {
-            tag,
-            tag_encoding: TagEncoding::Direct,
-            tag_field: 0,
-            variants: IndexVec::new(),
-        },
-        fields: FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() },
-        largest_niche,
-        abi,
-        align,
-        size,
-        max_repr_align,
-        unadjusted_abi_align,
-    };
-
-    let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
-
-    let mut best_layout = match (tagged_layout, niche_filling_layout) {
-        (tl, Some(nl)) => {
-            // Pick the smaller layout; otherwise,
-            // pick the layout with the larger niche; otherwise,
-            // pick tagged as it has simpler codegen.
-            use cmp::Ordering::*;
-            let niche_size = |tmp_l: &TmpLayout<FieldIdx, VariantIdx>| {
-                tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
-            };
-            match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
-                (Greater, _) => nl,
-                (Equal, Less) => nl,
-                _ => tl,
-            }
-        }
-        (tl, None) => tl,
-    };
+        let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
 
-    // Now we can intern the variant layouts and store them in the enum layout.
-    best_layout.layout.variants = match best_layout.layout.variants {
-        Variants::Multiple { tag, tag_encoding, tag_field, .. } => {
-            Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants }
-        }
-        Variants::Single { .. } => {
-            panic!("encountered a single-variant enum during multi-variant layout")
-        }
-    };
-    Some(best_layout.layout)
-}
+        let tagged_layout = LayoutS {
+            variants: Variants::Multiple {
+                tag,
+                tag_encoding: TagEncoding::Direct,
+                tag_field: 0,
+                variants: IndexVec::new(),
+            },
+            fields: FieldsShape::Arbitrary {
+                offsets: [Size::ZERO].into(),
+                memory_index: [0].into(),
+            },
+            largest_niche,
+            abi,
+            align,
+            size,
+            max_repr_align,
+            unadjusted_abi_align,
+        };
 
-/// Determines towards which end of a struct layout optimizations will try to place the best niches.
-enum NicheBias {
-    Start,
-    End,
-}
+        let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
 
-fn univariant<
-    'a,
-    FieldIdx: Idx,
-    VariantIdx: Idx,
-    F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
->(
-    this: &(impl LayoutCalculator + ?Sized),
-    dl: &TargetDataLayout,
-    fields: &IndexSlice<FieldIdx, F>,
-    repr: &ReprOptions,
-    kind: StructKind,
-    niche_bias: NicheBias,
-) -> Option<LayoutS<FieldIdx, VariantIdx>> {
-    let pack = repr.pack;
-    let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-    let mut max_repr_align = repr.align;
-    let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
-    let optimize_field_order = !repr.inhibit_struct_field_reordering();
-    if optimize_field_order && fields.len() > 1 {
-        let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
-        let optimizing = &mut inverse_memory_index.raw[..end];
-        let fields_excluding_tail = &fields.raw[..end];
-
-        // If `-Z randomize-layout` was enabled for the type definition we can shuffle
-        // the field ordering to try and catch some code making assumptions about layouts
-        // we don't guarantee.
-        if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
-            #[cfg(feature = "randomize")]
-            {
-                use rand::seq::SliceRandom;
-                use rand::SeedableRng;
-                // `ReprOptions.field_shuffle_seed` is a deterministic seed we can use to randomize field
-                // ordering.
-                let mut rng =
-                    rand_xoshiro::Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
-
-                // Shuffle the ordering of the fields.
-                optimizing.shuffle(&mut rng);
+        let mut best_layout = match (tagged_layout, niche_filling_layout) {
+            (tl, Some(nl)) => {
+                // Pick the smaller layout; otherwise,
+                // pick the layout with the larger niche; otherwise,
+                // pick tagged as it has simpler codegen.
+                use cmp::Ordering::*;
+                let niche_size = |tmp_l: &TmpLayout<FieldIdx, VariantIdx>| {
+                    tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
+                };
+                match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
+                    (Greater, _) => nl,
+                    (Equal, Less) => nl,
+                    _ => tl,
+                }
             }
-            // Otherwise we just leave things alone and actually optimize the type's fields
-        } else {
-            // To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must
-            // not depend on the layout of the tail.
-            let max_field_align =
-                fields_excluding_tail.iter().map(|f| f.align.abi.bytes()).max().unwrap_or(1);
-            let largest_niche_size = fields_excluding_tail
-                .iter()
-                .filter_map(|f| f.largest_niche)
-                .map(|n| n.available(dl))
-                .max()
-                .unwrap_or(0);
-
-            // Calculates a sort key to group fields by their alignment or possibly some
-            // size-derived pseudo-alignment.
-            let alignment_group_key = |layout: &F| {
-                // The two branches here return values that cannot be meaningfully compared with
-                // each other. However, we know that consistently for all executions of
-                // `alignment_group_key`, one or the other branch will be taken, so this is okay.
-                if let Some(pack) = pack {
-                    // Return the packed alignment in bytes.
-                    layout.align.abi.min(pack).bytes()
-                } else {
-                    // Returns `log2(effective-align)`. The calculation assumes that size is an
-                    // integer multiple of align, except for ZSTs.
-                    let align = layout.align.abi.bytes();
-                    let size = layout.size.bytes();
-                    let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0);
-                    // Group [u8; 4] with align-4 or [u8; 6] with align-2 fields.
-                    let size_as_align = align.max(size).trailing_zeros();
-                    let size_as_align = if largest_niche_size > 0 {
-                        match niche_bias {
-                            // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the
-                            // array to the front in the first case (for aligned loads) but keep
-                            // the bool in front in the second case for its niches.
-                            NicheBias::Start => max_field_align.trailing_zeros().min(size_as_align),
-                            // When moving niches towards the end of the struct then for
-                            // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
-                            // in the align-1 group because its bool can be moved closer to the end.
-                            NicheBias::End if niche_size == largest_niche_size => {
-                                align.trailing_zeros()
-                            }
-                            NicheBias::End => size_as_align,
-                        }
+            (tl, None) => tl,
+        };
+
+        // Now we can intern the variant layouts and store them in the enum layout.
+        best_layout.layout.variants = match best_layout.layout.variants {
+            Variants::Multiple { tag, tag_encoding, tag_field, .. } => {
+                Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants }
+            }
+            Variants::Single { .. } => {
+                panic!("encountered a single-variant enum during multi-variant layout")
+            }
+        };
+        Ok(best_layout.layout)
+    }
+
+    fn univariant_biased<
+        'a,
+        FieldIdx: Idx,
+        VariantIdx: Idx,
+        F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
+    >(
+        &self,
+        fields: &IndexSlice<FieldIdx, F>,
+        repr: &ReprOptions,
+        kind: StructKind,
+        niche_bias: NicheBias,
+    ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
+        let dl = self.cx.data_layout();
+        let pack = repr.pack;
+        let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+        let mut max_repr_align = repr.align;
+        let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
+        let optimize_field_order = !repr.inhibit_struct_field_reordering();
+        if optimize_field_order && fields.len() > 1 {
+            let end =
+                if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
+            let optimizing = &mut inverse_memory_index.raw[..end];
+            let fields_excluding_tail = &fields.raw[..end];
+
+            // If `-Z randomize-layout` was enabled for the type definition we can shuffle
+            // the field ordering to try and catch some code making assumptions about layouts
+            // we don't guarantee.
+            if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
+                #[cfg(feature = "randomize")]
+                {
+                    use rand::seq::SliceRandom;
+                    use rand::SeedableRng;
+                    // `ReprOptions.field_shuffle_seed` is a deterministic seed we can use to randomize field
+                    // ordering.
+                    let mut rng =
+                        rand_xoshiro::Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
+
+                    // Shuffle the ordering of the fields.
+                    optimizing.shuffle(&mut rng);
+                }
+                // Otherwise we just leave things alone and actually optimize the type's fields
+            } else {
+                // To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must
+                // not depend on the layout of the tail.
+                let max_field_align =
+                    fields_excluding_tail.iter().map(|f| f.align.abi.bytes()).max().unwrap_or(1);
+                let largest_niche_size = fields_excluding_tail
+                    .iter()
+                    .filter_map(|f| f.largest_niche)
+                    .map(|n| n.available(dl))
+                    .max()
+                    .unwrap_or(0);
+
+                // Calculates a sort key to group fields by their alignment or possibly some
+                // size-derived pseudo-alignment.
+                let alignment_group_key = |layout: &F| {
+                    // The two branches here return values that cannot be meaningfully compared with
+                    // each other. However, we know that consistently for all executions of
+                    // `alignment_group_key`, one or the other branch will be taken, so this is okay.
+                    if let Some(pack) = pack {
+                        // Return the packed alignment in bytes.
+                        layout.align.abi.min(pack).bytes()
                     } else {
-                        size_as_align
-                    };
-                    size_as_align as u64
+                        // Returns `log2(effective-align)`. The calculation assumes that size is an
+                        // integer multiple of align, except for ZSTs.
+                        let align = layout.align.abi.bytes();
+                        let size = layout.size.bytes();
+                        let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0);
+                        // Group [u8; 4] with align-4 or [u8; 6] with align-2 fields.
+                        let size_as_align = align.max(size).trailing_zeros();
+                        let size_as_align = if largest_niche_size > 0 {
+                            match niche_bias {
+                                // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the
+                                // array to the front in the first case (for aligned loads) but keep
+                                // the bool in front in the second case for its niches.
+                                NicheBias::Start => {
+                                    max_field_align.trailing_zeros().min(size_as_align)
+                                }
+                                // When moving niches towards the end of the struct then for
+                                // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
+                                // in the align-1 group because its bool can be moved closer to the end.
+                                NicheBias::End if niche_size == largest_niche_size => {
+                                    align.trailing_zeros()
+                                }
+                                NicheBias::End => size_as_align,
+                            }
+                        } else {
+                            size_as_align
+                        };
+                        size_as_align as u64
+                    }
+                };
+
+                match kind {
+                    StructKind::AlwaysSized | StructKind::MaybeUnsized => {
+                        // Currently `LayoutS` only exposes a single niche so sorting is usually
+                        // sufficient to get one niche into the preferred position. If it ever
+                        // supported multiple niches then a more advanced pick-and-pack approach could
+                        // provide better results. But even for the single-niche cache it's not
+                        // optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the
+                        // bool to the front but it would require packing the tuple together with the
+                        // u16 to build a 4-byte group so that the u32 can be placed after it without
+                        // padding. This kind of packing can't be achieved by sorting.
+                        optimizing.sort_by_key(|&x| {
+                            let f = &fields[x];
+                            let field_size = f.size.bytes();
+                            let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
+                            let niche_size_key = match niche_bias {
+                                // large niche first
+                                NicheBias::Start => !niche_size,
+                                // large niche last
+                                NicheBias::End => niche_size,
+                            };
+                            let inner_niche_offset_key = match niche_bias {
+                                NicheBias::Start => f.largest_niche.map_or(0, |n| n.offset.bytes()),
+                                NicheBias::End => f.largest_niche.map_or(0, |n| {
+                                    !(field_size - n.value.size(dl).bytes() - n.offset.bytes())
+                                }),
+                            };
+
+                            (
+                                // Then place largest alignments first.
+                                cmp::Reverse(alignment_group_key(f)),
+                                // Then prioritize niche placement within alignment group according to
+                                // `niche_bias_start`.
+                                niche_size_key,
+                                // Then among fields with equally-sized niches prefer the ones
+                                // closer to the start/end of the field.
+                                inner_niche_offset_key,
+                            )
+                        });
+                    }
+
+                    StructKind::Prefixed(..) => {
+                        // Sort in ascending alignment so that the layout stays optimal
+                        // regardless of the prefix.
+                        // And put the largest niche in an alignment group at the end
+                        // so it can be used as discriminant in jagged enums
+                        optimizing.sort_by_key(|&x| {
+                            let f = &fields[x];
+                            let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
+                            (alignment_group_key(f), niche_size)
+                        });
+                    }
                 }
-            };
 
-            match kind {
-                StructKind::AlwaysSized | StructKind::MaybeUnsized => {
-                    // Currently `LayoutS` only exposes a single niche so sorting is usually
-                    // sufficient to get one niche into the preferred position. If it ever
-                    // supported multiple niches then a more advanced pick-and-pack approach could
-                    // provide better results. But even for the single-niche cache it's not
-                    // optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the
-                    // bool to the front but it would require packing the tuple together with the
-                    // u16 to build a 4-byte group so that the u32 can be placed after it without
-                    // padding. This kind of packing can't be achieved by sorting.
-                    optimizing.sort_by_key(|&x| {
-                        let f = &fields[x];
-                        let field_size = f.size.bytes();
-                        let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
-                        let niche_size_key = match niche_bias {
-                            // large niche first
-                            NicheBias::Start => !niche_size,
-                            // large niche last
-                            NicheBias::End => niche_size,
-                        };
-                        let inner_niche_offset_key = match niche_bias {
-                            NicheBias::Start => f.largest_niche.map_or(0, |n| n.offset.bytes()),
-                            NicheBias::End => f.largest_niche.map_or(0, |n| {
-                                !(field_size - n.value.size(dl).bytes() - n.offset.bytes())
-                            }),
-                        };
+                // FIXME(Kixiron): We can always shuffle fields within a given alignment class
+                //                 regardless of the status of `-Z randomize-layout`
+            }
+        }
+        // inverse_memory_index holds field indices by increasing memory offset.
+        // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
+        // We now write field offsets to the corresponding offset slot;
+        // field 5 with offset 0 puts 0 in offsets[5].
+        // At the bottom of this function, we invert `inverse_memory_index` to
+        // produce `memory_index` (see `invert_mapping`).
+        let mut unsized_field = None::<&F>;
+        let mut offsets = IndexVec::from_elem(Size::ZERO, fields);
+        let mut offset = Size::ZERO;
+        let mut largest_niche = None;
+        let mut largest_niche_available = 0;
+        if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
+            let prefix_align =
+                if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
+            align = align.max(AbiAndPrefAlign::new(prefix_align));
+            offset = prefix_size.align_to(prefix_align);
+        }
+        for &i in &inverse_memory_index {
+            let field = &fields[i];
+            if let Some(unsized_field) = unsized_field {
+                return Err(LayoutCalculatorError::UnexpectedUnsized(*unsized_field));
+            }
 
-                        (
-                            // Then place largest alignments first.
-                            cmp::Reverse(alignment_group_key(f)),
-                            // Then prioritize niche placement within alignment group according to
-                            // `niche_bias_start`.
-                            niche_size_key,
-                            // Then among fields with equally-sized niches prefer the ones
-                            // closer to the start/end of the field.
-                            inner_niche_offset_key,
-                        )
-                    });
+            if field.is_unsized() {
+                if let StructKind::MaybeUnsized = kind {
+                    unsized_field = Some(field);
+                } else {
+                    return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
                 }
+            }
+
+            // Invariant: offset < dl.obj_size_bound() <= 1<<61
+            let field_align = if let Some(pack) = pack {
+                field.align.min(AbiAndPrefAlign::new(pack))
+            } else {
+                field.align
+            };
+            offset = offset.align_to(field_align.abi);
+            align = align.max(field_align);
+            max_repr_align = max_repr_align.max(field.max_repr_align);
 
-                StructKind::Prefixed(..) => {
-                    // Sort in ascending alignment so that the layout stays optimal
-                    // regardless of the prefix.
-                    // And put the largest niche in an alignment group at the end
-                    // so it can be used as discriminant in jagged enums
-                    optimizing.sort_by_key(|&x| {
-                        let f = &fields[x];
-                        let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
-                        (alignment_group_key(f), niche_size)
-                    });
+            debug!("univariant offset: {:?} field: {:#?}", offset, field);
+            offsets[i] = offset;
+
+            if let Some(mut niche) = field.largest_niche {
+                let available = niche.available(dl);
+                // Pick up larger niches.
+                let prefer_new_niche = match niche_bias {
+                    NicheBias::Start => available > largest_niche_available,
+                    // if there are several niches of the same size then pick the last one
+                    NicheBias::End => available >= largest_niche_available,
+                };
+                if prefer_new_niche {
+                    largest_niche_available = available;
+                    niche.offset += offset;
+                    largest_niche = Some(niche);
                 }
             }
 
-            // FIXME(Kixiron): We can always shuffle fields within a given alignment class
-            //                 regardless of the status of `-Z randomize-layout`
-        }
-    }
-    // inverse_memory_index holds field indices by increasing memory offset.
-    // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
-    // We now write field offsets to the corresponding offset slot;
-    // field 5 with offset 0 puts 0 in offsets[5].
-    // At the bottom of this function, we invert `inverse_memory_index` to
-    // produce `memory_index` (see `invert_mapping`).
-    let mut sized = true;
-    let mut offsets = IndexVec::from_elem(Size::ZERO, fields);
-    let mut offset = Size::ZERO;
-    let mut largest_niche = None;
-    let mut largest_niche_available = 0;
-    if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
-        let prefix_align =
-            if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
-        align = align.max(AbiAndPrefAlign::new(prefix_align));
-        offset = prefix_size.align_to(prefix_align);
-    }
-    for &i in &inverse_memory_index {
-        let field = &fields[i];
-        if !sized {
-            this.delayed_bug(format!(
-                "univariant: field #{} comes after unsized field",
-                offsets.len(),
-            ));
+            offset =
+                offset.checked_add(field.size, dl).ok_or(LayoutCalculatorError::SizeOverflow)?;
         }
 
-        if field.is_unsized() {
-            sized = false;
+        // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
+        // See documentation on `LayoutS::unadjusted_abi_align`.
+        let unadjusted_abi_align = align.abi;
+        if let Some(repr_align) = repr.align {
+            align = align.max(AbiAndPrefAlign::new(repr_align));
         }
+        // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate.
+        let align = align;
 
-        // Invariant: offset < dl.obj_size_bound() <= 1<<61
-        let field_align = if let Some(pack) = pack {
-            field.align.min(AbiAndPrefAlign::new(pack))
+        debug!("univariant min_size: {:?}", offset);
+        let min_size = offset;
+        // As stated above, inverse_memory_index holds field indices by increasing offset.
+        // This makes it an already-sorted view of the offsets vec.
+        // To invert it, consider:
+        // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
+        // Field 5 would be the first element, so memory_index is i:
+        // Note: if we didn't optimize, it's already right.
+        let memory_index = if optimize_field_order {
+            inverse_memory_index.invert_bijective_mapping()
         } else {
-            field.align
+            debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
+            inverse_memory_index.into_iter().map(|it| it.index() as u32).collect()
         };
-        offset = offset.align_to(field_align.abi);
-        align = align.max(field_align);
-        max_repr_align = max_repr_align.max(field.max_repr_align);
-
-        debug!("univariant offset: {:?} field: {:#?}", offset, field);
-        offsets[i] = offset;
-
-        if let Some(mut niche) = field.largest_niche {
-            let available = niche.available(dl);
-            // Pick up larger niches.
-            let prefer_new_niche = match niche_bias {
-                NicheBias::Start => available > largest_niche_available,
-                // if there are several niches of the same size then pick the last one
-                NicheBias::End => available >= largest_niche_available,
-            };
-            if prefer_new_niche {
-                largest_niche_available = available;
-                niche.offset += offset;
-                largest_niche = Some(niche);
-            }
+        let size = min_size.align_to(align.abi);
+        // FIXME(oli-obk): deduplicate and harden these checks
+        if size.bytes() >= dl.obj_size_bound() {
+            return Err(LayoutCalculatorError::SizeOverflow);
         }
+        let mut layout_of_single_non_zst_field = None;
+        let sized = unsized_field.is_none();
+        let mut abi = Abi::Aggregate { sized };
 
-        offset = offset.checked_add(field.size, dl)?;
-    }
+        let optimize_abi = !repr.inhibit_newtype_abi_optimization();
 
-    // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
-    // See documentation on `LayoutS::unadjusted_abi_align`.
-    let unadjusted_abi_align = align.abi;
-    if let Some(repr_align) = repr.align {
-        align = align.max(AbiAndPrefAlign::new(repr_align));
-    }
-    // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate.
-    let align = align;
-
-    debug!("univariant min_size: {:?}", offset);
-    let min_size = offset;
-    // As stated above, inverse_memory_index holds field indices by increasing offset.
-    // This makes it an already-sorted view of the offsets vec.
-    // To invert it, consider:
-    // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
-    // Field 5 would be the first element, so memory_index is i:
-    // Note: if we didn't optimize, it's already right.
-    let memory_index = if optimize_field_order {
-        inverse_memory_index.invert_bijective_mapping()
-    } else {
-        debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
-        inverse_memory_index.into_iter().map(|it| it.index() as u32).collect()
-    };
-    let size = min_size.align_to(align.abi);
-    // FIXME(oli-obk): deduplicate and harden these checks
-    if size.bytes() >= dl.obj_size_bound() {
-        return None;
-    }
-    let mut layout_of_single_non_zst_field = None;
-    let mut abi = Abi::Aggregate { sized };
-
-    let optimize_abi = !repr.inhibit_newtype_abi_optimization();
-
-    // Try to make this a Scalar/ScalarPair.
-    if sized && size.bytes() > 0 {
-        // We skip *all* ZST here and later check if we are good in terms of alignment.
-        // This lets us handle some cases involving aligned ZST.
-        let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.is_zst());
-
-        match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
-            // We have exactly one non-ZST field.
-            (Some((i, field)), None, None) => {
-                layout_of_single_non_zst_field = Some(field);
-
-                // Field fills the struct and it has a scalar or scalar pair ABI.
-                if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size {
-                    match field.abi {
-                        // For plain scalars, or vectors of them, we can't unpack
-                        // newtypes for `#[repr(C)]`, as that affects C ABIs.
-                        Abi::Scalar(_) | Abi::Vector { .. } if optimize_abi => {
-                            abi = field.abi;
-                        }
-                        // But scalar pairs are Rust-specific and get
-                        // treated as aggregates by C ABIs anyway.
-                        Abi::ScalarPair(..) => {
-                            abi = field.abi;
+        // Try to make this a Scalar/ScalarPair.
+        if sized && size.bytes() > 0 {
+            // We skip *all* ZST here and later check if we are good in terms of alignment.
+            // This lets us handle some cases involving aligned ZST.
+            let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.is_zst());
+
+            match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
+                // We have exactly one non-ZST field.
+                (Some((i, field)), None, None) => {
+                    layout_of_single_non_zst_field = Some(field);
+
+                    // Field fills the struct and it has a scalar or scalar pair ABI.
+                    if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
+                    {
+                        match field.abi {
+                            // For plain scalars, or vectors of them, we can't unpack
+                            // newtypes for `#[repr(C)]`, as that affects C ABIs.
+                            Abi::Scalar(_) | Abi::Vector { .. } if optimize_abi => {
+                                abi = field.abi;
+                            }
+                            // But scalar pairs are Rust-specific and get
+                            // treated as aggregates by C ABIs anyway.
+                            Abi::ScalarPair(..) => {
+                                abi = field.abi;
+                            }
+                            _ => {}
                         }
-                        _ => {}
                     }
                 }
-            }
 
-            // Two non-ZST fields, and they're both scalars.
-            (Some((i, a)), Some((j, b)), None) => {
-                match (a.abi, b.abi) {
-                    (Abi::Scalar(a), Abi::Scalar(b)) => {
-                        // Order by the memory placement, not source order.
-                        let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
-                            ((i, a), (j, b))
-                        } else {
-                            ((j, b), (i, a))
-                        };
-                        let pair = this.scalar_pair::<FieldIdx, VariantIdx>(a, b);
-                        let pair_offsets = match pair.fields {
-                            FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
-                                assert_eq!(memory_index.raw, [0, 1]);
-                                offsets
-                            }
-                            FieldsShape::Primitive
-                            | FieldsShape::Array { .. }
-                            | FieldsShape::Union(..) => {
-                                panic!("encountered a non-arbitrary layout during enum layout")
+                // Two non-ZST fields, and they're both scalars.
+                (Some((i, a)), Some((j, b)), None) => {
+                    match (a.abi, b.abi) {
+                        (Abi::Scalar(a), Abi::Scalar(b)) => {
+                            // Order by the memory placement, not source order.
+                            let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
+                                ((i, a), (j, b))
+                            } else {
+                                ((j, b), (i, a))
+                            };
+                            let pair = self.scalar_pair::<FieldIdx, VariantIdx>(a, b);
+                            let pair_offsets = match pair.fields {
+                                FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+                                    assert_eq!(memory_index.raw, [0, 1]);
+                                    offsets
+                                }
+                                FieldsShape::Primitive
+                                | FieldsShape::Array { .. }
+                                | FieldsShape::Union(..) => {
+                                    panic!("encountered a non-arbitrary layout during enum layout")
+                                }
+                            };
+                            if offsets[i] == pair_offsets[FieldIdx::new(0)]
+                                && offsets[j] == pair_offsets[FieldIdx::new(1)]
+                                && align == pair.align
+                                && size == pair.size
+                            {
+                                // We can use `ScalarPair` only when it matches our
+                                // already computed layout (including `#[repr(C)]`).
+                                abi = pair.abi;
                             }
-                        };
-                        if offsets[i] == pair_offsets[FieldIdx::new(0)]
-                            && offsets[j] == pair_offsets[FieldIdx::new(1)]
-                            && align == pair.align
-                            && size == pair.size
-                        {
-                            // We can use `ScalarPair` only when it matches our
-                            // already computed layout (including `#[repr(C)]`).
-                            abi = pair.abi;
                         }
+                        _ => {}
                     }
-                    _ => {}
                 }
-            }
 
-            _ => {}
+                _ => {}
+            }
+        }
+        if fields.iter().any(|f| f.abi.is_uninhabited()) {
+            abi = Abi::Uninhabited;
         }
-    }
-    if fields.iter().any(|f| f.abi.is_uninhabited()) {
-        abi = Abi::Uninhabited;
-    }
 
-    let unadjusted_abi_align = if repr.transparent() {
-        match layout_of_single_non_zst_field {
-            Some(l) => l.unadjusted_abi_align,
-            None => {
-                // `repr(transparent)` with all ZST fields.
-                align.abi
+        let unadjusted_abi_align = if repr.transparent() {
+            match layout_of_single_non_zst_field {
+                Some(l) => l.unadjusted_abi_align,
+                None => {
+                    // `repr(transparent)` with all ZST fields.
+                    align.abi
+                }
             }
-        }
-    } else {
-        unadjusted_abi_align
-    };
-
-    Some(LayoutS {
-        variants: Variants::Single { index: VariantIdx::new(0) },
-        fields: FieldsShape::Arbitrary { offsets, memory_index },
-        abi,
-        largest_niche,
-        align,
-        size,
-        max_repr_align,
-        unadjusted_abi_align,
-    })
-}
+        } else {
+            unadjusted_abi_align
+        };
 
-fn format_field_niches<
-    'a,
-    FieldIdx: Idx,
-    VariantIdx: Idx,
-    F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
->(
-    layout: &LayoutS<FieldIdx, VariantIdx>,
-    fields: &IndexSlice<FieldIdx, F>,
-    dl: &TargetDataLayout,
-) -> String {
-    let mut s = String::new();
-    for i in layout.fields.index_by_increasing_offset() {
-        let offset = layout.fields.offset(i);
-        let f = &fields[FieldIdx::new(i)];
-        write!(s, "[o{}a{}s{}", offset.bytes(), f.align.abi.bytes(), f.size.bytes()).unwrap();
-        if let Some(n) = f.largest_niche {
-            write!(
-                s,
-                " n{}b{}s{}",
-                n.offset.bytes(),
-                n.available(dl).ilog2(),
-                n.value.size(dl).bytes()
-            )
-            .unwrap();
+        Ok(LayoutS {
+            variants: Variants::Single { index: VariantIdx::new(0) },
+            fields: FieldsShape::Arbitrary { offsets, memory_index },
+            abi,
+            largest_niche,
+            align,
+            size,
+            max_repr_align,
+            unadjusted_abi_align,
+        })
+    }
+
+    fn format_field_niches<
+        'a,
+        FieldIdx: Idx,
+        VariantIdx: Idx,
+        F: Deref<Target = &'a LayoutS<FieldIdx, VariantIdx>> + fmt::Debug,
+    >(
+        &self,
+        layout: &LayoutS<FieldIdx, VariantIdx>,
+        fields: &IndexSlice<FieldIdx, F>,
+    ) -> String {
+        let dl = self.cx.data_layout();
+        let mut s = String::new();
+        for i in layout.fields.index_by_increasing_offset() {
+            let offset = layout.fields.offset(i);
+            let f = &fields[FieldIdx::new(i)];
+            write!(s, "[o{}a{}s{}", offset.bytes(), f.align.abi.bytes(), f.size.bytes()).unwrap();
+            if let Some(n) = f.largest_niche {
+                write!(
+                    s,
+                    " n{}b{}s{}",
+                    n.offset.bytes(),
+                    n.available(dl).ilog2(),
+                    n.value.size(dl).bytes()
+                )
+                .unwrap();
+            }
+            write!(s, "] ").unwrap();
         }
-        write!(s, "] ").unwrap();
+        s
     }
-    s
 }
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index 4e27ad0c366..5668d8992c8 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -26,7 +26,7 @@ mod layout;
 #[cfg(test)]
 mod tests;
 
-pub use layout::LayoutCalculator;
+pub use layout::{LayoutCalculator, LayoutCalculatorError};
 
 /// Requirements for a `StableHashingContext` to be used in this crate.
 /// This is a hack to allow using the `HashStable_Generic` derive macro
@@ -393,6 +393,14 @@ impl HasDataLayout for TargetDataLayout {
     }
 }
 
+// used by rust-analyzer
+impl HasDataLayout for &TargetDataLayout {
+    #[inline]
+    fn data_layout(&self) -> &TargetDataLayout {
+        (**self).data_layout()
+    }
+}
+
 /// Endianness of the target, which must match cfg(target-endian).
 #[derive(Copy, Clone, PartialEq, Eq)]
 pub enum Endian {
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 9705dd506b9..15c519dfcb4 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -2066,14 +2066,14 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         };
     }
 
-    arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
+    arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, -0.0);
     arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
     arith_red!(
         simd_reduce_add_unordered: vector_reduce_add,
         vector_reduce_fadd_reassoc,
         false,
         add,
-        0.0
+        -0.0
     );
     arith_red!(
         simd_reduce_mul_unordered: vector_reduce_mul,
diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs
index 7f5f9f03868..bda22255705 100644
--- a/compiler/rustc_codegen_ssa/src/back/linker.rs
+++ b/compiler/rustc_codegen_ssa/src/back/linker.rs
@@ -7,7 +7,9 @@ use std::{env, iter, mem, str};
 
 use cc::windows_registry;
 use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
-use rustc_metadata::{find_native_static_library, try_find_native_static_library};
+use rustc_metadata::{
+    find_native_static_library, try_find_native_dynamic_library, try_find_native_static_library,
+};
 use rustc_middle::bug;
 use rustc_middle::middle::dependency_format::Linkage;
 use rustc_middle::middle::exported_symbols;
@@ -876,7 +878,13 @@ impl<'a> Linker for MsvcLinker<'a> {
     }
 
     fn link_dylib_by_name(&mut self, name: &str, verbatim: bool, _as_needed: bool) {
-        self.link_arg(format!("{}{}", name, if verbatim { "" } else { ".lib" }));
+        // On MSVC-like targets rustc supports import libraries using alternative naming
+        // scheme (`libfoo.a`) unsupported by linker, search for such libraries manually.
+        if let Some(path) = try_find_native_dynamic_library(self.sess, name, verbatim) {
+            self.link_arg(path);
+        } else {
+            self.link_arg(format!("{}{}", name, if verbatim { "" } else { ".lib" }));
+        }
     }
 
     fn link_dylib_by_path(&mut self, path: &Path, _as_needed: bool) {
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index c96296eddb8..8351a6af25b 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -319,7 +319,7 @@ pub fn valtree_to_const_value<'tcx>(
                 let branches = valtree.unwrap_branch();
                 // Find the non-ZST field. (There can be aligned ZST!)
                 for (i, &inner_valtree) in branches.iter().enumerate() {
-                    let field = layout.field(&LayoutCx { tcx, param_env }, i);
+                    let field = layout.field(&LayoutCx::new(tcx, param_env), i);
                     if !field.is_zst() {
                         return valtree_to_const_value(tcx, param_env.and(field.ty), inner_valtree);
                     }
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index ca38f779256..b20df7ac9c1 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -21,7 +21,7 @@ use rustc_middle::mir::interpret::{
     UnsupportedOpInfo, ValidationErrorInfo,
 };
 use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, Ty};
 use rustc_span::symbol::{sym, Symbol};
 use rustc_target::abi::{
     Abi, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange,
@@ -940,7 +940,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
     ) -> Cow<'e, RangeSet> {
         assert!(layout.ty.is_union());
         assert!(layout.abi.is_sized(), "there are no unsized unions");
-        let layout_cx = LayoutCx { tcx: *ecx.tcx, param_env: ecx.param_env };
+        let layout_cx = LayoutCx::new(*ecx.tcx, ecx.param_env);
         return M::cached_union_data_range(ecx, layout.ty, || {
             let mut out = RangeSet(Vec::new());
             union_data_range_uncached(&layout_cx, layout, Size::ZERO, &mut out);
@@ -949,7 +949,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
 
         /// Helper for recursive traversal: add data ranges of the given type to `out`.
         fn union_data_range_uncached<'tcx>(
-            cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+            cx: &LayoutCx<'tcx>,
             layout: TyAndLayout<'tcx>,
             base_offset: Size,
             out: &mut RangeSet,
diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
index 611a8e1a884..19393188c9a 100644
--- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
+++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
@@ -1,5 +1,7 @@
 use rustc_middle::bug;
-use rustc_middle::ty::layout::{LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement};
+use rustc_middle::ty::layout::{
+    HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement,
+};
 use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt};
 use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants};
 
@@ -30,7 +32,7 @@ pub fn check_validity_requirement<'tcx>(
         return Ok(!layout.abi.is_uninhabited());
     }
 
-    let layout_cx = LayoutCx { tcx, param_env: param_env_and_ty.param_env };
+    let layout_cx = LayoutCx::new(tcx, param_env_and_ty.param_env);
     if kind == ValidityRequirement::Uninit || tcx.sess.opts.unstable_opts.strict_init_checks {
         check_validity_requirement_strict(layout, &layout_cx, kind)
     } else {
@@ -42,12 +44,12 @@ pub fn check_validity_requirement<'tcx>(
 /// for details.
 fn check_validity_requirement_strict<'tcx>(
     ty: TyAndLayout<'tcx>,
-    cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+    cx: &LayoutCx<'tcx>,
     kind: ValidityRequirement,
 ) -> Result<bool, &'tcx LayoutError<'tcx>> {
     let machine = CompileTimeMachine::new(CanAccessMutGlobal::No, CheckAlignment::Error);
 
-    let mut cx = InterpCx::new(cx.tcx, rustc_span::DUMMY_SP, cx.param_env, machine);
+    let mut cx = InterpCx::new(cx.tcx(), rustc_span::DUMMY_SP, cx.param_env, machine);
 
     let allocated = cx
         .allocate(ty, MemoryKind::Machine(crate::const_eval::MemoryKind::Heap))
@@ -80,7 +82,7 @@ fn check_validity_requirement_strict<'tcx>(
 /// function for details.
 fn check_validity_requirement_lax<'tcx>(
     this: TyAndLayout<'tcx>,
-    cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+    cx: &LayoutCx<'tcx>,
     init_kind: ValidityRequirement,
 ) -> Result<bool, &'tcx LayoutError<'tcx>> {
     let scalar_allows_raw_init = move |s: Scalar| -> bool {
diff --git a/compiler/rustc_error_codes/src/error_codes/E0799.md b/compiler/rustc_error_codes/src/error_codes/E0799.md
new file mode 100644
index 00000000000..38ebc840604
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0799.md
@@ -0,0 +1,19 @@
+Something other than a type or const parameter has been used when one was
+expected.
+
+Erroneous code example:
+
+```compile_fail,E0799
+fn bad1() -> impl Sized + use<main> {}
+
+fn bad2(x: ()) -> impl Sized + use<x> {}
+
+fn main() {}
+```
+
+In the given examples, for `bad1`, the name `main` corresponds to a function
+rather than a type or const parameter. In `bad2`, the name `x` corresponds to
+a function argument rather than a type or const parameter.
+
+Only type and const parameters, including `Self`, may be captured by
+`use<...>` precise capturing bounds.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0800.md b/compiler/rustc_error_codes/src/error_codes/E0800.md
new file mode 100644
index 00000000000..3e08cd499b7
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0800.md
@@ -0,0 +1,11 @@
+A type or const parameter of the given name is not in scope.
+
+Erroneous code examples:
+
+```compile_fail,E0800
+fn missing() -> impl Sized + use<T> {}
+```
+
+To fix this error, please verify you didn't misspell the type or const
+parameter, or double-check if you forgot to declare the parameter in
+the list of generics.
diff --git a/compiler/rustc_error_codes/src/lib.rs b/compiler/rustc_error_codes/src/lib.rs
index 150f99a3ee7..d6f0206b0de 100644
--- a/compiler/rustc_error_codes/src/lib.rs
+++ b/compiler/rustc_error_codes/src/lib.rs
@@ -538,6 +538,8 @@ E0795: 0795,
 E0796: 0796,
 E0797: 0797,
 E0798: 0798,
+E0799: 0799,
+E0800: 0800,
         );
     )
 }
diff --git a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
index 388e02b36e0..cc7a0dff34e 100644
--- a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
+++ b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
@@ -726,7 +726,7 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
                     num_trait_args,
                     num_impl_args,
                     def_id,
-                    impl_def_id: impl_m.container_id(tcx),
+                    impl_m_def_id: impl_m.def_id,
                     ty,
                     return_span,
                 }) {
@@ -844,12 +844,18 @@ where
 
 struct RemapHiddenTyRegions<'tcx> {
     tcx: TyCtxt<'tcx>,
+    /// Map from early/late params of the impl to identity regions of the RPITIT (GAT)
+    /// in the trait.
     map: FxIndexMap<ty::Region<'tcx>, ty::Region<'tcx>>,
     num_trait_args: usize,
     num_impl_args: usize,
+    /// Def id of the RPITIT (GAT) in the *trait*.
     def_id: DefId,
-    impl_def_id: DefId,
+    /// Def id of the impl method which owns the opaque hidden type we're remapping.
+    impl_m_def_id: DefId,
+    /// The hidden type we're remapping. Useful for diagnostics.
     ty: Ty<'tcx>,
+    /// Span of the return type. Useful for diagnostics.
     return_span: Span,
 }
 
@@ -885,8 +891,7 @@ impl<'tcx> ty::FallibleTypeFolder<TyCtxt<'tcx>> for RemapHiddenTyRegions<'tcx> {
             ty::ReLateParam(_) => {}
             // Remap early-bound regions as long as they don't come from the `impl` itself,
             // in which case we don't really need to renumber them.
-            ty::ReEarlyParam(ebr)
-                if ebr.index >= self.tcx.generics_of(self.impl_def_id).count() as u32 => {}
+            ty::ReEarlyParam(ebr) if ebr.index as usize >= self.num_impl_args => {}
             _ => return Ok(region),
         }
 
@@ -899,7 +904,7 @@ impl<'tcx> ty::FallibleTypeFolder<TyCtxt<'tcx>> for RemapHiddenTyRegions<'tcx> {
                 );
             }
         } else {
-            let guar = match region.opt_param_def_id(self.tcx, self.tcx.parent(self.def_id)) {
+            let guar = match region.opt_param_def_id(self.tcx, self.impl_m_def_id) {
                 Some(def_id) => {
                     let return_span = if let ty::Alias(ty::Opaque, opaque_ty) = self.ty.kind() {
                         self.tcx.def_span(opaque_ty.def_id)
diff --git a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
index b4cbd1f309c..53dcede91c3 100644
--- a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
+++ b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
@@ -584,7 +584,6 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
                 | Res::SelfTyParam { trait_: def_id } => {
                     self.resolve_type_ref(def_id.expect_local(), param.hir_id);
                 }
-                Res::Err => {}
                 Res::SelfTyAlias { alias_to, .. } => {
                     self.tcx.dcx().emit_err(errors::PreciseCaptureSelfAlias {
                         span: param.ident.span,
@@ -593,11 +592,10 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
                     });
                 }
                 res => {
-                    self.tcx.dcx().emit_err(errors::BadPreciseCapture {
-                        span: param.ident.span,
-                        kind: "type or const",
-                        found: res.descr().to_string(),
-                    });
+                    self.tcx.dcx().span_delayed_bug(
+                        param.ident.span,
+                        format!("expected type or const param, found {res:?}"),
+                    );
                 }
             },
         }
diff --git a/compiler/rustc_hir_analysis/src/errors/precise_captures.rs b/compiler/rustc_hir_analysis/src/errors/precise_captures.rs
index af2bb053c0a..b6cffb90805 100644
--- a/compiler/rustc_hir_analysis/src/errors/precise_captures.rs
+++ b/compiler/rustc_hir_analysis/src/errors/precise_captures.rs
@@ -1,3 +1,4 @@
+use rustc_errors::E0799;
 use rustc_macros::Diagnostic;
 use rustc_span::{Span, Symbol};
 
@@ -43,7 +44,7 @@ pub(crate) struct BadPreciseCapture {
 }
 
 #[derive(Diagnostic)]
-#[diag(hir_analysis_precise_capture_self_alias)]
+#[diag(hir_analysis_precise_capture_self_alias, code = E0799)]
 pub(crate) struct PreciseCaptureSelfAlias {
     #[primary_span]
     pub span: Span,
diff --git a/compiler/rustc_hir_typeck/src/coercion.rs b/compiler/rustc_hir_typeck/src/coercion.rs
index 3bada1de148..fca7babea30 100644
--- a/compiler/rustc_hir_typeck/src/coercion.rs
+++ b/compiler/rustc_hir_typeck/src/coercion.rs
@@ -1049,7 +1049,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     /// trait or region sub-obligations. (presumably we could, but it's not
     /// particularly important for diagnostics...)
     pub(crate) fn deref_once_mutably_for_diagnostic(&self, expr_ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
-        self.autoderef(DUMMY_SP, expr_ty).nth(1).and_then(|(deref_ty, _)| {
+        self.autoderef(DUMMY_SP, expr_ty).silence_errors().nth(1).and_then(|(deref_ty, _)| {
             self.infcx
                 .type_implements_trait(
                     self.tcx.lang_items().deref_mut_trait()?,
diff --git a/compiler/rustc_hir_typeck/src/expr.rs b/compiler/rustc_hir_typeck/src/expr.rs
index 6fd509ed32f..821a90d7a8c 100644
--- a/compiler/rustc_hir_typeck/src/expr.rs
+++ b/compiler/rustc_hir_typeck/src/expr.rs
@@ -2864,13 +2864,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             (expr_t, "")
         };
         for (found_fields, args) in
-            self.get_field_candidates_considering_privacy(span, ty, mod_id, id)
+            self.get_field_candidates_considering_privacy_for_diag(span, ty, mod_id, id)
         {
             let field_names = found_fields.iter().map(|field| field.name).collect::<Vec<_>>();
             let mut candidate_fields: Vec<_> = found_fields
                 .into_iter()
                 .filter_map(|candidate_field| {
-                    self.check_for_nested_field_satisfying(
+                    self.check_for_nested_field_satisfying_condition_for_diag(
                         span,
                         &|candidate_field, _| candidate_field.ident(self.tcx()) == field,
                         candidate_field,
@@ -2933,7 +2933,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         .with_span_label(field.span, "private field")
     }
 
-    pub(crate) fn get_field_candidates_considering_privacy(
+    pub(crate) fn get_field_candidates_considering_privacy_for_diag(
         &self,
         span: Span,
         base_ty: Ty<'tcx>,
@@ -2942,7 +2942,18 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     ) -> Vec<(Vec<&'tcx ty::FieldDef>, GenericArgsRef<'tcx>)> {
         debug!("get_field_candidates(span: {:?}, base_t: {:?}", span, base_ty);
 
-        self.autoderef(span, base_ty)
+        let mut autoderef = self.autoderef(span, base_ty).silence_errors();
+        let deref_chain: Vec<_> = autoderef.by_ref().collect();
+
+        // Don't probe if we hit the recursion limit, since it may result in
+        // quadratic blowup if we then try to further deref the results of this
+        // function. This is a best-effort method, after all.
+        if autoderef.reached_recursion_limit() {
+            return vec![];
+        }
+
+        deref_chain
+            .into_iter()
             .filter_map(move |(base_t, _)| {
                 match base_t.kind() {
                     ty::Adt(base_def, args) if !base_def.is_enum() => {
@@ -2975,7 +2986,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
 
     /// This method is called after we have encountered a missing field error to recursively
     /// search for the field
-    pub(crate) fn check_for_nested_field_satisfying(
+    pub(crate) fn check_for_nested_field_satisfying_condition_for_diag(
         &self,
         span: Span,
         matches: &impl Fn(&ty::FieldDef, Ty<'tcx>) -> bool,
@@ -3000,20 +3011,24 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             if matches(candidate_field, field_ty) {
                 return Some(field_path);
             } else {
-                for (nested_fields, subst) in
-                    self.get_field_candidates_considering_privacy(span, field_ty, mod_id, hir_id)
+                for (nested_fields, subst) in self
+                    .get_field_candidates_considering_privacy_for_diag(
+                        span, field_ty, mod_id, hir_id,
+                    )
                 {
                     // recursively search fields of `candidate_field` if it's a ty::Adt
                     for field in nested_fields {
-                        if let Some(field_path) = self.check_for_nested_field_satisfying(
-                            span,
-                            matches,
-                            field,
-                            subst,
-                            field_path.clone(),
-                            mod_id,
-                            hir_id,
-                        ) {
+                        if let Some(field_path) = self
+                            .check_for_nested_field_satisfying_condition_for_diag(
+                                span,
+                                matches,
+                                field,
+                                subst,
+                                field_path.clone(),
+                                mod_id,
+                                hir_id,
+                            )
+                        {
                             return Some(field_path);
                         }
                     }
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
index 8348c6e9a16..8810f14aaa9 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
@@ -2619,9 +2619,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         is_method: bool,
     ) -> Option<Vec<(Option<&hir::GenericParam<'_>>, &hir::Param<'_>)>> {
         let fn_node = self.tcx.hir().get_if_local(def_id)?;
+        let fn_decl = fn_node.fn_decl()?;
 
-        let generic_params: Vec<Option<&hir::GenericParam<'_>>> = fn_node
-            .fn_decl()?
+        let generic_params: Vec<Option<&hir::GenericParam<'_>>> = fn_decl
             .inputs
             .into_iter()
             .skip(if is_method { 1 } else { 0 })
@@ -2642,7 +2642,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             })
             .collect();
 
-        let params: Vec<&hir::Param<'_>> = self
+        let mut params: Vec<&hir::Param<'_>> = self
             .tcx
             .hir()
             .body(fn_node.body_id()?)
@@ -2651,7 +2651,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             .skip(if is_method { 1 } else { 0 })
             .collect();
 
-        Some(generic_params.into_iter().zip_eq(params).collect())
+        // The surrounding code expects variadic functions to not have a parameter representing
+        // the "..." parameter. This is already true of the FnDecl but not of the body params, so
+        // we drop it if it exists.
+
+        if fn_decl.c_variadic {
+            params.pop();
+        }
+
+        debug_assert_eq!(params.len(), generic_params.len());
+        Some(generic_params.into_iter().zip(params).collect())
     }
 }
 
diff --git a/compiler/rustc_hir_typeck/src/method/probe.rs b/compiler/rustc_hir_typeck/src/method/probe.rs
index 3ba3429cbb3..5dc341653e5 100644
--- a/compiler/rustc_hir_typeck/src/method/probe.rs
+++ b/compiler/rustc_hir_typeck/src/method/probe.rs
@@ -375,7 +375,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         // If our autoderef loop had reached the recursion limit,
         // report an overflow error, but continue going on with
         // the truncated autoderef list.
-        if steps.reached_recursion_limit {
+        if steps.reached_recursion_limit && !is_suggestion.0 {
             self.probe(|_| {
                 let ty = &steps
                     .steps
diff --git a/compiler/rustc_hir_typeck/src/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs
index 9ede809ead2..deabf693af2 100644
--- a/compiler/rustc_hir_typeck/src/method/suggest.rs
+++ b/compiler/rustc_hir_typeck/src/method/suggest.rs
@@ -62,14 +62,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                 // It might seem that we can use `predicate_must_hold_modulo_regions`,
                 // but since a Dummy binder is used to fill in the FnOnce trait's arguments,
                 // type resolution always gives a "maybe" here.
-                if self.autoderef(span, ty).any(|(ty, _)| {
+                if self.autoderef(span, ty).silence_errors().any(|(ty, _)| {
                     info!("check deref {:?} error", ty);
                     matches!(ty.kind(), ty::Error(_) | ty::Infer(_))
                 }) {
                     return false;
                 }
 
-                self.autoderef(span, ty).any(|(ty, _)| {
+                self.autoderef(span, ty).silence_errors().any(|(ty, _)| {
                     info!("check deref {:?} impl FnOnce", ty);
                     self.probe(|_| {
                         let trait_ref =
@@ -90,7 +90,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     }
 
     fn is_slice_ty(&self, ty: Ty<'tcx>, span: Span) -> bool {
-        self.autoderef(span, ty).any(|(ty, _)| matches!(ty.kind(), ty::Slice(..) | ty::Array(..)))
+        self.autoderef(span, ty)
+            .silence_errors()
+            .any(|(ty, _)| matches!(ty.kind(), ty::Slice(..) | ty::Array(..)))
     }
 
     fn impl_into_iterator_should_be_iterator(
@@ -672,7 +674,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         let mut ty_str_reported = ty_str.clone();
         if let ty::Adt(_, generics) = rcvr_ty.kind() {
             if generics.len() > 0 {
-                let mut autoderef = self.autoderef(span, rcvr_ty);
+                let mut autoderef = self.autoderef(span, rcvr_ty).silence_errors();
                 let candidate_found = autoderef.any(|(ty, _)| {
                     if let ty::Adt(adt_def, _) = ty.kind() {
                         self.tcx
@@ -2237,6 +2239,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             let impl_ty = self.tcx.type_of(*impl_did).instantiate_identity();
             let target_ty = self
                 .autoderef(sugg_span, rcvr_ty)
+                .silence_errors()
                 .find(|(rcvr_ty, _)| {
                     DeepRejectCtxt::relate_rigid_infer(self.tcx).types_may_unify(*rcvr_ty, impl_ty)
                 })
@@ -2352,17 +2355,18 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         err: &mut Diag<'_>,
     ) -> bool {
         let tcx = self.tcx;
-        let field_receiver = self.autoderef(span, rcvr_ty).find_map(|(ty, _)| match ty.kind() {
-            ty::Adt(def, args) if !def.is_enum() => {
-                let variant = &def.non_enum_variant();
-                tcx.find_field_index(item_name, variant).map(|index| {
-                    let field = &variant.fields[index];
-                    let field_ty = field.ty(tcx, args);
-                    (field, field_ty)
-                })
-            }
-            _ => None,
-        });
+        let field_receiver =
+            self.autoderef(span, rcvr_ty).silence_errors().find_map(|(ty, _)| match ty.kind() {
+                ty::Adt(def, args) if !def.is_enum() => {
+                    let variant = &def.non_enum_variant();
+                    tcx.find_field_index(item_name, variant).map(|index| {
+                        let field = &variant.fields[index];
+                        let field_ty = field.ty(tcx, args);
+                        (field, field_ty)
+                    })
+                }
+                _ => None,
+            });
         if let Some((field, field_ty)) = field_receiver {
             let scope = tcx.parent_module_from_def_id(self.body_id);
             let is_accessible = field.vis.is_accessible_from(scope, tcx);
@@ -2675,9 +2679,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     ) {
         if let SelfSource::MethodCall(expr) = source {
             let mod_id = self.tcx.parent_module(expr.hir_id).to_def_id();
-            for (fields, args) in
-                self.get_field_candidates_considering_privacy(span, actual, mod_id, expr.hir_id)
-            {
+            for (fields, args) in self.get_field_candidates_considering_privacy_for_diag(
+                span,
+                actual,
+                mod_id,
+                expr.hir_id,
+            ) {
                 let call_expr = self.tcx.hir().expect_expr(self.tcx.parent_hir_id(expr.hir_id));
 
                 let lang_items = self.tcx.lang_items();
@@ -2693,7 +2700,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                 let mut candidate_fields: Vec<_> = fields
                     .into_iter()
                     .filter_map(|candidate_field| {
-                        self.check_for_nested_field_satisfying(
+                        self.check_for_nested_field_satisfying_condition_for_diag(
                             span,
                             &|_, field_ty| {
                                 self.lookup_probe_for_diagnostic(
@@ -3195,7 +3202,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         let SelfSource::QPath(ty) = self_source else {
             return;
         };
-        for (deref_ty, _) in self.autoderef(DUMMY_SP, rcvr_ty).skip(1) {
+        for (deref_ty, _) in self.autoderef(DUMMY_SP, rcvr_ty).silence_errors().skip(1) {
             if let Ok(pick) = self.probe_for_name(
                 Mode::Path,
                 item_name,
@@ -4221,7 +4228,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             return is_local(rcvr_ty);
         }
 
-        self.autoderef(span, rcvr_ty).any(|(ty, _)| is_local(ty))
+        self.autoderef(span, rcvr_ty).silence_errors().any(|(ty, _)| is_local(ty))
     }
 }
 
diff --git a/compiler/rustc_hir_typeck/src/pat.rs b/compiler/rustc_hir_typeck/src/pat.rs
index c8c95ddcfce..45a6efc7a6a 100644
--- a/compiler/rustc_hir_typeck/src/pat.rs
+++ b/compiler/rustc_hir_typeck/src/pat.rs
@@ -2533,6 +2533,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             err.help("the semantics of slice patterns changed recently; see issue #62254");
         } else if self
             .autoderef(span, expected_ty)
+            .silence_errors()
             .any(|(ty, _)| matches!(ty.kind(), ty::Slice(..) | ty::Array(..)))
             && let Some(span) = ti.span
             && let Some(_) = ti.origin_expr
diff --git a/compiler/rustc_index/src/slice.rs b/compiler/rustc_index/src/slice.rs
index 3205ca3f40b..956d32c9a69 100644
--- a/compiler/rustc_index/src/slice.rs
+++ b/compiler/rustc_index/src/slice.rs
@@ -20,7 +20,7 @@ pub struct IndexSlice<I: Idx, T> {
 
 impl<I: Idx, T> IndexSlice<I, T> {
     #[inline]
-    pub const fn empty() -> &'static Self {
+    pub const fn empty<'a>() -> &'a Self {
         Self::from_raw(&[])
     }
 
diff --git a/compiler/rustc_llvm/build.rs b/compiler/rustc_llvm/build.rs
index b2ff9efb41c..f092110a324 100644
--- a/compiler/rustc_llvm/build.rs
+++ b/compiler/rustc_llvm/build.rs
@@ -220,7 +220,10 @@ fn main() {
     let mut cmd = Command::new(&llvm_config);
     cmd.arg(llvm_link_arg).arg("--libs");
 
-    if !is_crossed {
+    // Don't link system libs if cross-compiling unless targetting Windows.
+    // On Windows system DLLs aren't linked directly, instead import libraries are used.
+    // These import libraries are independent of the host.
+    if !is_crossed || target.contains("windows") {
         cmd.arg("--system-libs");
     }
 
diff --git a/compiler/rustc_metadata/src/lib.rs b/compiler/rustc_metadata/src/lib.rs
index 58b352f263d..02a9ce455b2 100644
--- a/compiler/rustc_metadata/src/lib.rs
+++ b/compiler/rustc_metadata/src/lib.rs
@@ -37,7 +37,8 @@ pub mod locator;
 pub use creader::{load_symbol_from_dylib, DylibError};
 pub use fs::{emit_wrapper_file, METADATA_FILENAME};
 pub use native_libs::{
-    find_native_static_library, try_find_native_static_library, walk_native_lib_search_dirs,
+    find_native_static_library, try_find_native_dynamic_library, try_find_native_static_library,
+    walk_native_lib_search_dirs,
 };
 pub use rmeta::{encode_metadata, rendered_const, EncodedMetadata, METADATA_HEADER};
 
diff --git a/compiler/rustc_metadata/src/native_libs.rs b/compiler/rustc_metadata/src/native_libs.rs
index a6ad449cb53..0329a193d89 100644
--- a/compiler/rustc_metadata/src/native_libs.rs
+++ b/compiler/rustc_metadata/src/native_libs.rs
@@ -109,6 +109,44 @@ pub fn try_find_native_static_library(
     .break_value()
 }
 
+pub fn try_find_native_dynamic_library(
+    sess: &Session,
+    name: &str,
+    verbatim: bool,
+) -> Option<PathBuf> {
+    let formats = if verbatim {
+        vec![("".into(), "".into())]
+    } else {
+        // While the official naming convention for MSVC import libraries
+        // is foo.lib...
+        let os = (sess.target.staticlib_prefix.clone(), sess.target.staticlib_suffix.clone());
+        // ... Meson follows the libfoo.dll.a convention to
+        // disambiguate .a for static libraries
+        let meson = ("lib".into(), ".dll.a".into());
+        // and MinGW uses .a altogether
+        let mingw = ("lib".into(), ".a".into());
+        vec![os, meson, mingw]
+    };
+
+    walk_native_lib_search_dirs(
+        sess,
+        LinkSelfContainedComponents::empty(),
+        None,
+        |dir, is_framework| {
+            if !is_framework {
+                for (prefix, suffix) in &formats {
+                    let test = dir.join(format!("{prefix}{name}{suffix}"));
+                    if test.exists() {
+                        return ControlFlow::Break(test);
+                    }
+                }
+            }
+            ControlFlow::Continue(())
+        },
+    )
+    .break_value()
+}
+
 pub fn find_native_static_library(name: &str, verbatim: bool, sess: &Session) -> PathBuf {
     try_find_native_static_library(sess, name, verbatim)
         .unwrap_or_else(|| sess.dcx().emit_fatal(errors::MissingNativeLibrary::new(name, verbatim)))
diff --git a/compiler/rustc_middle/src/ty/generics.rs b/compiler/rustc_middle/src/ty/generics.rs
index bbc696e0f08..5f9a89c3a5b 100644
--- a/compiler/rustc_middle/src/ty/generics.rs
+++ b/compiler/rustc_middle/src/ty/generics.rs
@@ -255,7 +255,9 @@ impl<'tcx> Generics {
         let param = self.param_at(param.index as usize, tcx);
         match param.kind {
             GenericParamDefKind::Lifetime => param,
-            _ => bug!("expected lifetime parameter, but found another generic parameter"),
+            _ => {
+                bug!("expected lifetime parameter, but found another generic parameter: {param:#?}")
+            }
         }
     }
 
@@ -264,7 +266,7 @@ impl<'tcx> Generics {
         let param = self.param_at(param.index as usize, tcx);
         match param.kind {
             GenericParamDefKind::Type { .. } => param,
-            _ => bug!("expected type parameter, but found another generic parameter"),
+            _ => bug!("expected type parameter, but found another generic parameter: {param:#?}"),
         }
     }
 
@@ -273,7 +275,7 @@ impl<'tcx> Generics {
         let param = self.param_at(param.index as usize, tcx);
         match param.kind {
             GenericParamDefKind::Const { .. } => param,
-            _ => bug!("expected const parameter, but found another generic parameter"),
+            _ => bug!("expected const parameter, but found another generic parameter: {param:#?}"),
         }
     }
 
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index 254a0119920..6d878ab7654 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -1,4 +1,3 @@
-use std::borrow::Cow;
 use std::num::NonZero;
 use std::ops::Bound;
 use std::{cmp, fmt};
@@ -286,20 +285,14 @@ impl<'tcx> IntoDiagArg for LayoutError<'tcx> {
 }
 
 #[derive(Clone, Copy)]
-pub struct LayoutCx<'tcx, C> {
-    pub tcx: C,
+pub struct LayoutCx<'tcx> {
+    pub calc: LayoutCalculator<TyCtxt<'tcx>>,
     pub param_env: ty::ParamEnv<'tcx>,
 }
 
-impl<'tcx> LayoutCalculator for LayoutCx<'tcx, TyCtxt<'tcx>> {
-    type TargetDataLayoutRef = &'tcx TargetDataLayout;
-
-    fn delayed_bug(&self, txt: impl Into<Cow<'static, str>>) {
-        self.tcx.dcx().delayed_bug(txt);
-    }
-
-    fn current_data_layout(&self) -> Self::TargetDataLayoutRef {
-        &self.tcx.data_layout
+impl<'tcx> LayoutCx<'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
+        Self { calc: LayoutCalculator::new(tcx), param_env }
     }
 }
 
@@ -568,33 +561,33 @@ impl<'tcx> HasTyCtxt<'tcx> for TyCtxtAt<'tcx> {
     }
 }
 
-impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
+impl<'tcx> HasParamEnv<'tcx> for LayoutCx<'tcx> {
     fn param_env(&self) -> ty::ParamEnv<'tcx> {
         self.param_env
     }
 }
 
-impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
+impl<'tcx> HasDataLayout for LayoutCx<'tcx> {
     fn data_layout(&self) -> &TargetDataLayout {
-        self.tcx.data_layout()
+        self.calc.cx.data_layout()
     }
 }
 
-impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
+impl<'tcx> HasTargetSpec for LayoutCx<'tcx> {
     fn target_spec(&self) -> &Target {
-        self.tcx.target_spec()
+        self.calc.cx.target_spec()
     }
 }
 
-impl<'tcx, T: HasWasmCAbiOpt> HasWasmCAbiOpt for LayoutCx<'tcx, T> {
+impl<'tcx> HasWasmCAbiOpt for LayoutCx<'tcx> {
     fn wasm_c_abi_opt(&self) -> WasmCAbi {
-        self.tcx.wasm_c_abi_opt()
+        self.calc.cx.wasm_c_abi_opt()
     }
 }
 
-impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
+impl<'tcx> HasTyCtxt<'tcx> for LayoutCx<'tcx> {
     fn tcx(&self) -> TyCtxt<'tcx> {
-        self.tcx.tcx()
+        self.calc.cx
     }
 }
 
@@ -685,7 +678,7 @@ pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
 
 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
 
-impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
+impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx> {
     type LayoutOfResult = Result<TyAndLayout<'tcx>, &'tcx LayoutError<'tcx>>;
 
     #[inline]
@@ -695,26 +688,7 @@ impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
         _: Span,
         _: Ty<'tcx>,
     ) -> &'tcx LayoutError<'tcx> {
-        self.tcx.arena.alloc(err)
-    }
-}
-
-impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxtAt<'tcx>> {
-    type LayoutOfResult = Result<TyAndLayout<'tcx>, &'tcx LayoutError<'tcx>>;
-
-    #[inline]
-    fn layout_tcx_at_span(&self) -> Span {
-        self.tcx.span
-    }
-
-    #[inline]
-    fn handle_layout_err(
-        &self,
-        err: LayoutError<'tcx>,
-        _: Span,
-        _: Ty<'tcx>,
-    ) -> &'tcx LayoutError<'tcx> {
-        self.tcx.arena.alloc(err)
+        self.tcx().arena.alloc(err)
     }
 }
 
@@ -1342,7 +1316,7 @@ impl<'tcx> TyCtxt<'tcx> {
     where
         I: Iterator<Item = (VariantIdx, FieldIdx)>,
     {
-        let cx = LayoutCx { tcx: self, param_env };
+        let cx = LayoutCx::new(self, param_env);
         let mut offset = Size::ZERO;
 
         for (variant, field) in indices {
diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs
index d70ff8258d0..a0262f12cb5 100644
--- a/compiler/rustc_middle/src/ty/util.rs
+++ b/compiler/rustc_middle/src/ty/util.rs
@@ -571,7 +571,10 @@ impl<'tcx> TyCtxt<'tcx> {
     /// Returns `true` if `def_id` refers to a definition that does not have its own
     /// type-checking context, i.e. closure, coroutine or inline const.
     pub fn is_typeck_child(self, def_id: DefId) -> bool {
-        matches!(self.def_kind(def_id), DefKind::Closure | DefKind::InlineConst)
+        matches!(
+            self.def_kind(def_id),
+            DefKind::Closure | DefKind::InlineConst | DefKind::SyntheticCoroutineBody
+        )
     }
 
     /// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`).
diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs
index f1bd803d835..47d04d8a00b 100644
--- a/compiler/rustc_mir_transform/src/shim.rs
+++ b/compiler/rustc_mir_transform/src/shim.rs
@@ -1070,19 +1070,26 @@ fn build_construct_coroutine_by_move_shim<'tcx>(
     let locals = local_decls_for_sig(&sig, span);
 
     let mut fields = vec![];
+
+    // Move all of the closure args.
     for idx in 1..sig.inputs().len() {
         fields.push(Operand::Move(Local::from_usize(idx + 1).into()));
     }
+
     for (idx, ty) in args.as_coroutine_closure().upvar_tys().iter().enumerate() {
         if receiver_by_ref {
             // The only situation where it's possible is when we capture immuatable references,
             // since those don't need to be reborrowed with the closure's env lifetime. Since
             // references are always `Copy`, just emit a copy.
-            assert_matches!(
-                ty.kind(),
-                ty::Ref(_, _, hir::Mutability::Not),
-                "field should be captured by immutable ref if we have an `Fn` instance"
-            );
+            if !matches!(ty.kind(), ty::Ref(_, _, hir::Mutability::Not)) {
+                // This copy is only sound if it's a `&T`. This may be
+                // reachable e.g. when eagerly computing the `Fn` instance
+                // of an async closure that doesn't borrowck.
+                tcx.dcx().delayed_bug(format!(
+                    "field should be captured by immutable ref if we have \
+                    an `Fn` instance, but it was: {ty}"
+                ));
+            }
             fields.push(Operand::Copy(tcx.mk_place_field(
                 self_local,
                 FieldIdx::from_usize(idx),
diff --git a/compiler/rustc_passes/src/layout_test.rs b/compiler/rustc_passes/src/layout_test.rs
index e1bc770d817..312cc3a26ef 100644
--- a/compiler/rustc_passes/src/layout_test.rs
+++ b/compiler/rustc_passes/src/layout_test.rs
@@ -128,7 +128,7 @@ fn dump_layout_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) {
         }
 
         Err(layout_error) => {
-            tcx.dcx().emit_fatal(Spanned { node: layout_error.into_diagnostic(), span });
+            tcx.dcx().emit_err(Spanned { node: layout_error.into_diagnostic(), span });
         }
     }
 }
diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs
index 0aa351cad40..4bf2cc287da 100644
--- a/compiler/rustc_resolve/src/late.rs
+++ b/compiler/rustc_resolve/src/late.rs
@@ -402,6 +402,8 @@ pub(crate) enum PathSource<'a> {
     TraitItem(Namespace),
     // Paths in delegation item
     Delegation,
+    /// An arg in a `use<'a, N>` precise-capturing bound.
+    PreciseCapturingArg(Namespace),
 }
 
 impl<'a> PathSource<'a> {
@@ -413,6 +415,7 @@ impl<'a> PathSource<'a> {
             | PathSource::TupleStruct(..)
             | PathSource::Delegation => ValueNS,
             PathSource::TraitItem(ns) => ns,
+            PathSource::PreciseCapturingArg(ns) => ns,
         }
     }
 
@@ -423,7 +426,10 @@ impl<'a> PathSource<'a> {
             | PathSource::Pat
             | PathSource::Struct
             | PathSource::TupleStruct(..) => true,
-            PathSource::Trait(_) | PathSource::TraitItem(..) | PathSource::Delegation => false,
+            PathSource::Trait(_)
+            | PathSource::TraitItem(..)
+            | PathSource::Delegation
+            | PathSource::PreciseCapturingArg(..) => false,
         }
     }
 
@@ -466,6 +472,7 @@ impl<'a> PathSource<'a> {
                 _ => "value",
             },
             PathSource::Delegation => "function",
+            PathSource::PreciseCapturingArg(..) => "type or const parameter",
         }
     }
 
@@ -534,6 +541,15 @@ impl<'a> PathSource<'a> {
                 _ => false,
             },
             PathSource::Delegation => matches!(res, Res::Def(DefKind::Fn | DefKind::AssocFn, _)),
+            PathSource::PreciseCapturingArg(ValueNS) => {
+                matches!(res, Res::Def(DefKind::ConstParam, _))
+            }
+            // We allow `SelfTyAlias` here so we can give a more descriptive error later.
+            PathSource::PreciseCapturingArg(TypeNS) => matches!(
+                res,
+                Res::Def(DefKind::TyParam, _) | Res::SelfTyParam { .. } | Res::SelfTyAlias { .. }
+            ),
+            PathSource::PreciseCapturingArg(MacroNS) => false,
         }
     }
 
@@ -551,6 +567,8 @@ impl<'a> PathSource<'a> {
             (PathSource::Pat | PathSource::TupleStruct(..), false) => E0531,
             (PathSource::TraitItem(..), true) => E0575,
             (PathSource::TraitItem(..), false) => E0576,
+            (PathSource::PreciseCapturingArg(..), true) => E0799,
+            (PathSource::PreciseCapturingArg(..), false) => E0800,
         }
     }
 }
@@ -1077,9 +1095,19 @@ impl<'ra: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'r
                 };
                 // Like `Ty::Param`, we try resolving this as both a const and a type.
                 if !check_ns(TypeNS) && check_ns(ValueNS) {
-                    self.smart_resolve_path(*id, &None, path, PathSource::Expr(None));
+                    self.smart_resolve_path(
+                        *id,
+                        &None,
+                        path,
+                        PathSource::PreciseCapturingArg(ValueNS),
+                    );
                 } else {
-                    self.smart_resolve_path(*id, &None, path, PathSource::Type);
+                    self.smart_resolve_path(
+                        *id,
+                        &None,
+                        path,
+                        PathSource::PreciseCapturingArg(TypeNS),
+                    );
                 }
             }
         }
@@ -1889,7 +1917,10 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
             );
 
             let inferred = match source {
-                PathSource::Trait(..) | PathSource::TraitItem(..) | PathSource::Type => false,
+                PathSource::Trait(..)
+                | PathSource::TraitItem(..)
+                | PathSource::Type
+                | PathSource::PreciseCapturingArg(..) => false,
                 PathSource::Expr(..)
                 | PathSource::Pat
                 | PathSource::Struct
@@ -3982,7 +4013,9 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
                         Applicability::MaybeIncorrect,
                     ))
                 } else if res.is_none()
-                    && let PathSource::Type | PathSource::Expr(_) = source
+                    && let PathSource::Type
+                    | PathSource::Expr(_)
+                    | PathSource::PreciseCapturingArg(..) = source
                 {
                     this.suggest_adding_generic_parameter(path, source)
                 } else {
diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs
index 1c584bf8338..b5974e001aa 100644
--- a/compiler/rustc_resolve/src/late/diagnostics.rs
+++ b/compiler/rustc_resolve/src/late/diagnostics.rs
@@ -2068,33 +2068,34 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
             ) {
                 let res = binding.res();
                 if filter_fn(res) {
-                    let def_id = res.def_id();
-                    let has_self = match def_id.as_local() {
-                        Some(def_id) => {
-                            self.r.delegation_fn_sigs.get(&def_id).map_or(false, |sig| sig.has_self)
-                        }
-                        None => self
-                            .r
-                            .tcx
-                            .fn_arg_names(def_id)
-                            .first()
-                            .is_some_and(|ident| ident.name == kw::SelfLower),
-                    };
-                    if has_self {
-                        return Some(AssocSuggestion::MethodWithSelf { called });
-                    } else {
-                        match res {
-                            Res::Def(DefKind::AssocFn, _) => {
+                    match res {
+                        Res::Def(DefKind::Fn | DefKind::AssocFn, def_id) => {
+                            let has_self = match def_id.as_local() {
+                                Some(def_id) => self
+                                    .r
+                                    .delegation_fn_sigs
+                                    .get(&def_id)
+                                    .map_or(false, |sig| sig.has_self),
+                                None => self
+                                    .r
+                                    .tcx
+                                    .fn_arg_names(def_id)
+                                    .first()
+                                    .is_some_and(|ident| ident.name == kw::SelfLower),
+                            };
+                            if has_self {
+                                return Some(AssocSuggestion::MethodWithSelf { called });
+                            } else {
                                 return Some(AssocSuggestion::AssocFn { called });
                             }
-                            Res::Def(DefKind::AssocConst, _) => {
-                                return Some(AssocSuggestion::AssocConst);
-                            }
-                            Res::Def(DefKind::AssocTy, _) => {
-                                return Some(AssocSuggestion::AssocType);
-                            }
-                            _ => {}
                         }
+                        Res::Def(DefKind::AssocConst, _) => {
+                            return Some(AssocSuggestion::AssocConst);
+                        }
+                        Res::Def(DefKind::AssocTy, _) => {
+                            return Some(AssocSuggestion::AssocType);
+                        }
+                        _ => {}
                     }
                 }
             }
@@ -2538,8 +2539,13 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
                     }
 
                     let (msg, sugg) = match source {
-                        PathSource::Type => ("you might be missing a type parameter", ident),
-                        PathSource::Expr(_) => ("you might be missing a const parameter", format!("const {ident}: /* Type */")),
+                        PathSource::Type | PathSource::PreciseCapturingArg(TypeNS) => {
+                            ("you might be missing a type parameter", ident)
+                        }
+                        PathSource::Expr(_) | PathSource::PreciseCapturingArg(ValueNS) => (
+                            "you might be missing a const parameter",
+                            format!("const {ident}: /* Type */"),
+                        ),
                         _ => return None,
                     };
                     let (span, sugg) = if let [.., param] = &generics.params[..] {
diff --git a/compiler/rustc_transmute/src/layout/mod.rs b/compiler/rustc_transmute/src/layout/mod.rs
index 1cf9e0b9b70..a5c47c480e1 100644
--- a/compiler/rustc_transmute/src/layout/mod.rs
+++ b/compiler/rustc_transmute/src/layout/mod.rs
@@ -63,8 +63,8 @@ pub mod rustc {
     use std::fmt::{self, Write};
 
     use rustc_middle::mir::Mutability;
-    use rustc_middle::ty::layout::{LayoutCx, LayoutError};
-    use rustc_middle::ty::{self, Ty, TyCtxt};
+    use rustc_middle::ty::layout::{HasTyCtxt, LayoutCx, LayoutError};
+    use rustc_middle::ty::{self, Ty};
     use rustc_target::abi::Layout;
 
     /// A reference in the layout.
@@ -124,11 +124,11 @@ pub mod rustc {
     }
 
     pub(crate) fn layout_of<'tcx>(
-        cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
+        cx: LayoutCx<'tcx>,
         ty: Ty<'tcx>,
     ) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>> {
         use rustc_middle::ty::layout::LayoutOf;
-        let ty = cx.tcx.erase_regions(ty);
+        let ty = cx.tcx().erase_regions(ty);
         cx.layout_of(ty).map(|tl| tl.layout)
     }
 }
diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs
index b5ce465a173..ddf9b6c28f3 100644
--- a/compiler/rustc_transmute/src/layout/tree.rs
+++ b/compiler/rustc_transmute/src/layout/tree.rs
@@ -204,7 +204,7 @@ pub(crate) mod rustc {
     }
 
     impl<'tcx> Tree<Def<'tcx>, Ref<'tcx>> {
-        pub(crate) fn from_ty(ty: Ty<'tcx>, cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Result<Self, Err> {
+        pub(crate) fn from_ty(ty: Ty<'tcx>, cx: LayoutCx<'tcx>) -> Result<Self, Err> {
             use rustc_target::abi::HasDataLayout;
             let layout = layout_of(cx, ty)?;
 
@@ -212,7 +212,7 @@ pub(crate) mod rustc {
                 return Err(Err::TypeError(e));
             }
 
-            let target = cx.tcx.data_layout();
+            let target = cx.data_layout();
             let pointer_size = target.pointer_size;
 
             match ty.kind() {
@@ -274,7 +274,7 @@ pub(crate) mod rustc {
         fn from_tuple(
             (ty, layout): (Ty<'tcx>, Layout<'tcx>),
             members: &'tcx List<Ty<'tcx>>,
-            cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
+            cx: LayoutCx<'tcx>,
         ) -> Result<Self, Err> {
             match &layout.fields {
                 FieldsShape::Primitive => {
@@ -299,7 +299,7 @@ pub(crate) mod rustc {
         fn from_struct(
             (ty, layout): (Ty<'tcx>, Layout<'tcx>),
             def: AdtDef<'tcx>,
-            cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
+            cx: LayoutCx<'tcx>,
         ) -> Result<Self, Err> {
             assert!(def.is_struct());
             let def = Def::Adt(def);
@@ -314,13 +314,13 @@ pub(crate) mod rustc {
         fn from_enum(
             (ty, layout): (Ty<'tcx>, Layout<'tcx>),
             def: AdtDef<'tcx>,
-            cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
+            cx: LayoutCx<'tcx>,
         ) -> Result<Self, Err> {
             assert!(def.is_enum());
 
             // Computes the variant of a given index.
             let layout_of_variant = |index, encoding: Option<TagEncoding<VariantIdx>>| {
-                let tag = cx.tcx.tag_for_variant((cx.tcx.erase_regions(ty), index));
+                let tag = cx.tcx().tag_for_variant((cx.tcx().erase_regions(ty), index));
                 let variant_def = Def::Variant(def.variant(index));
                 let variant_layout = ty_variant(cx, (ty, layout), index);
                 Self::from_variant(
@@ -389,7 +389,7 @@ pub(crate) mod rustc {
             tag: Option<(ScalarInt, VariantIdx, TagEncoding<VariantIdx>)>,
             (ty, layout): (Ty<'tcx>, Layout<'tcx>),
             total_size: Size,
-            cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
+            cx: LayoutCx<'tcx>,
         ) -> Result<Self, Err> {
             // This constructor does not support non-`FieldsShape::Arbitrary`
             // layouts.
@@ -417,7 +417,7 @@ pub(crate) mod rustc {
                         }
                     }
                 }
-                struct_tree = struct_tree.then(Self::from_tag(*tag, cx.tcx));
+                struct_tree = struct_tree.then(Self::from_tag(*tag, cx.tcx()));
             }
 
             // Append the fields, in memory order, to the layout.
@@ -470,7 +470,7 @@ pub(crate) mod rustc {
         fn from_union(
             (ty, layout): (Ty<'tcx>, Layout<'tcx>),
             def: AdtDef<'tcx>,
-            cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
+            cx: LayoutCx<'tcx>,
         ) -> Result<Self, Err> {
             assert!(def.is_union());
 
@@ -500,7 +500,7 @@ pub(crate) mod rustc {
     }
 
     fn ty_field<'tcx>(
-        cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
+        cx: LayoutCx<'tcx>,
         (ty, layout): (Ty<'tcx>, Layout<'tcx>),
         i: FieldIdx,
     ) -> Ty<'tcx> {
@@ -509,12 +509,12 @@ pub(crate) mod rustc {
                 match layout.variants {
                     Variants::Single { index } => {
                         let field = &def.variant(index).fields[i];
-                        field.ty(cx.tcx, args)
+                        field.ty(cx.tcx(), args)
                     }
                     // Discriminant field for enums (where applicable).
                     Variants::Multiple { tag, .. } => {
                         assert_eq!(i.as_usize(), 0);
-                        ty::layout::PrimitiveExt::to_ty(&tag.primitive(), cx.tcx)
+                        ty::layout::PrimitiveExt::to_ty(&tag.primitive(), cx.tcx())
                     }
                 }
             }
@@ -527,11 +527,11 @@ pub(crate) mod rustc {
     }
 
     fn ty_variant<'tcx>(
-        cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
+        cx: LayoutCx<'tcx>,
         (ty, layout): (Ty<'tcx>, Layout<'tcx>),
         i: VariantIdx,
     ) -> Layout<'tcx> {
-        let ty = cx.tcx.erase_regions(ty);
+        let ty = cx.tcx().erase_regions(ty);
         TyAndLayout { ty, layout }.for_variant(&cx, i).layout
     }
 }
diff --git a/compiler/rustc_transmute/src/maybe_transmutable/mod.rs b/compiler/rustc_transmute/src/maybe_transmutable/mod.rs
index 95eaedbf04a..9a31d9e3ac4 100644
--- a/compiler/rustc_transmute/src/maybe_transmutable/mod.rs
+++ b/compiler/rustc_transmute/src/maybe_transmutable/mod.rs
@@ -43,7 +43,7 @@ mod rustc {
         pub fn answer(self) -> Answer<<TyCtxt<'tcx> as QueryContext>::Ref> {
             let Self { src, dst, assume, context } = self;
 
-            let layout_cx = LayoutCx { tcx: context, param_env: ParamEnv::reveal_all() };
+            let layout_cx = LayoutCx::new(context, ParamEnv::reveal_all());
 
             // Convert `src` and `dst` from their rustc representations, to `Tree`-based
             // representations.
diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs
index 16cd147b7d4..2d0c2e83690 100644
--- a/compiler/rustc_ty_utils/src/abi.rs
+++ b/compiler/rustc_ty_utils/src/abi.rs
@@ -331,7 +331,7 @@ fn fn_abi_of_fn_ptr<'tcx>(
 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> {
     let (param_env, (sig, extra_args)) = query.into_parts();
 
-    let cx = LayoutCx { tcx, param_env };
+    let cx = LayoutCx::new(tcx, param_env);
     fn_abi_new_uncached(&cx, sig, extra_args, None, None, false)
 }
 
@@ -347,7 +347,7 @@ fn fn_abi_of_instance<'tcx>(
         instance.def.requires_caller_location(tcx).then(|| tcx.caller_location_ty());
 
     fn_abi_new_uncached(
-        &LayoutCx { tcx, param_env },
+        &LayoutCx::new(tcx, param_env),
         sig,
         extra_args,
         caller_location,
@@ -358,7 +358,7 @@ fn fn_abi_of_instance<'tcx>(
 
 // Handle safe Rust thin and fat pointers.
 fn adjust_for_rust_scalar<'tcx>(
-    cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
+    cx: LayoutCx<'tcx>,
     attrs: &mut ArgAttributes,
     scalar: Scalar,
     layout: TyAndLayout<'tcx>,
@@ -386,12 +386,14 @@ fn adjust_for_rust_scalar<'tcx>(
         attrs.set(ArgAttribute::NonNull);
     }
 
+    let tcx = cx.tcx();
+
     if let Some(pointee) = layout.pointee_info_at(&cx, offset) {
         let kind = if let Some(kind) = pointee.safe {
             Some(kind)
         } else if let Some(pointee) = drop_target_pointee {
             // The argument to `drop_in_place` is semantically equivalent to a mutable reference.
-            Some(PointerKind::MutableRef { unpin: pointee.is_unpin(cx.tcx, cx.param_env()) })
+            Some(PointerKind::MutableRef { unpin: pointee.is_unpin(tcx, cx.param_env()) })
         } else {
             None
         };
@@ -415,12 +417,12 @@ fn adjust_for_rust_scalar<'tcx>(
             // The aliasing rules for `Box<T>` are still not decided, but currently we emit
             // `noalias` for it. This can be turned off using an unstable flag.
             // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
-            let noalias_for_box = cx.tcx.sess.opts.unstable_opts.box_noalias;
+            let noalias_for_box = tcx.sess.opts.unstable_opts.box_noalias;
 
             // LLVM prior to version 12 had known miscompiles in the presence of noalias attributes
             // (see #54878), so it was conditionally disabled, but we don't support earlier
             // versions at all anymore. We still support turning it off using -Zmutable-noalias.
-            let noalias_mut_ref = cx.tcx.sess.opts.unstable_opts.mutable_noalias;
+            let noalias_mut_ref = tcx.sess.opts.unstable_opts.mutable_noalias;
 
             // `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as both
             // `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely on memory
@@ -448,16 +450,17 @@ fn adjust_for_rust_scalar<'tcx>(
 
 /// Ensure that the ABI makes basic sense.
 fn fn_abi_sanity_check<'tcx>(
-    cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+    cx: &LayoutCx<'tcx>,
     fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
     spec_abi: SpecAbi,
 ) {
     fn fn_arg_sanity_check<'tcx>(
-        cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+        cx: &LayoutCx<'tcx>,
         fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
         spec_abi: SpecAbi,
         arg: &ArgAbi<'tcx, Ty<'tcx>>,
     ) {
+        let tcx = cx.tcx();
         match &arg.mode {
             PassMode::Ignore => {}
             PassMode::Direct(_) => {
@@ -484,7 +487,7 @@ fn fn_abi_sanity_check<'tcx>(
                     // It needs to switch to something else before stabilization can happen.
                     // (See issue: https://github.com/rust-lang/rust/issues/117271)
                     assert!(
-                        matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64")
+                        matches!(&*tcx.sess.target.arch, "wasm32" | "wasm64")
                             || matches!(spec_abi, SpecAbi::PtxKernel | SpecAbi::Unadjusted),
                         "`PassMode::Direct` for aggregates only allowed for \"unadjusted\" and \"ptx-kernel\" functions and on wasm\n\
                           Problematic type: {:#?}",
@@ -516,7 +519,7 @@ fn fn_abi_sanity_check<'tcx>(
                 // With metadata. Must be unsized and not on the stack.
                 assert!(arg.layout.is_unsized() && !on_stack);
                 // Also, must not be `extern` type.
-                let tail = cx.tcx.struct_tail_for_codegen(arg.layout.ty, cx.param_env());
+                let tail = tcx.struct_tail_for_codegen(arg.layout.ty, cx.param_env());
                 if matches!(tail.kind(), ty::Foreign(..)) {
                     // These types do not have metadata, so having `meta_attrs` is bogus.
                     // Conceptually, unsized arguments must be copied around, which requires dynamically
@@ -538,7 +541,7 @@ fn fn_abi_sanity_check<'tcx>(
 // arguments of this method, into a separate `struct`.
 #[tracing::instrument(level = "debug", skip(cx, caller_location, fn_def_id, force_thin_self_ptr))]
 fn fn_abi_new_uncached<'tcx>(
-    cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+    cx: &LayoutCx<'tcx>,
     sig: ty::PolyFnSig<'tcx>,
     extra_args: &[Ty<'tcx>],
     caller_location: Option<Ty<'tcx>>,
@@ -546,7 +549,8 @@ fn fn_abi_new_uncached<'tcx>(
     // FIXME(eddyb) replace this with something typed, like an `enum`.
     force_thin_self_ptr: bool,
 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> {
-    let sig = cx.tcx.normalize_erasing_late_bound_regions(cx.param_env, sig);
+    let tcx = cx.tcx();
+    let sig = tcx.normalize_erasing_late_bound_regions(cx.param_env, sig);
 
     let conv = conv_from_spec_abi(cx.tcx(), sig.abi, sig.c_variadic);
 
@@ -576,7 +580,7 @@ fn fn_abi_new_uncached<'tcx>(
     };
 
     let is_drop_in_place =
-        fn_def_id.is_some_and(|def_id| cx.tcx.is_lang_item(def_id, LangItem::DropInPlace));
+        fn_def_id.is_some_and(|def_id| tcx.is_lang_item(def_id, LangItem::DropInPlace));
 
     let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, &'tcx FnAbiError<'tcx>> {
         let span = tracing::debug_span!("arg_of");
@@ -588,8 +592,7 @@ fn fn_abi_new_uncached<'tcx>(
             _ => bug!("argument to drop_in_place is not a raw ptr: {:?}", ty),
         });
 
-        let layout =
-            cx.layout_of(ty).map_err(|err| &*cx.tcx.arena.alloc(FnAbiError::Layout(*err)))?;
+        let layout = cx.layout_of(ty).map_err(|err| &*tcx.arena.alloc(FnAbiError::Layout(*err)))?;
         let layout = if force_thin_self_ptr && arg_idx == Some(0) {
             // Don't pass the vtable, it's not an argument of the virtual fn.
             // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
@@ -638,12 +641,12 @@ fn fn_abi_new_uncached<'tcx>(
     fn_abi_adjust_for_abi(cx, &mut fn_abi, sig.abi, fn_def_id)?;
     debug!("fn_abi_new_uncached = {:?}", fn_abi);
     fn_abi_sanity_check(cx, &fn_abi, sig.abi);
-    Ok(cx.tcx.arena.alloc(fn_abi))
+    Ok(tcx.arena.alloc(fn_abi))
 }
 
 #[tracing::instrument(level = "trace", skip(cx))]
 fn fn_abi_adjust_for_abi<'tcx>(
-    cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+    cx: &LayoutCx<'tcx>,
     fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
     abi: SpecAbi,
     fn_def_id: Option<DefId>,
@@ -670,17 +673,18 @@ fn fn_abi_adjust_for_abi<'tcx>(
         return Ok(());
     }
 
+    let tcx = cx.tcx();
+
     if abi == SpecAbi::Rust || abi == SpecAbi::RustCall || abi == SpecAbi::RustIntrinsic {
         // Look up the deduced parameter attributes for this function, if we have its def ID and
         // we're optimizing in non-incremental mode. We'll tag its parameters with those attributes
         // as appropriate.
-        let deduced_param_attrs = if cx.tcx.sess.opts.optimize != OptLevel::No
-            && cx.tcx.sess.opts.incremental.is_none()
-        {
-            fn_def_id.map(|fn_def_id| cx.tcx.deduced_param_attrs(fn_def_id)).unwrap_or_default()
-        } else {
-            &[]
-        };
+        let deduced_param_attrs =
+            if tcx.sess.opts.optimize != OptLevel::No && tcx.sess.opts.incremental.is_none() {
+                fn_def_id.map(|fn_def_id| tcx.deduced_param_attrs(fn_def_id)).unwrap_or_default()
+            } else {
+                &[]
+            };
 
         let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>, arg_idx: Option<usize>| {
             if arg.is_ignore() {
@@ -689,7 +693,7 @@ fn fn_abi_adjust_for_abi<'tcx>(
 
             // Avoid returning floats in x87 registers on x86 as loading and storing from x87
             // registers will quiet signalling NaNs.
-            if cx.tcx.sess.target.arch == "x86"
+            if tcx.sess.target.arch == "x86"
                 && arg_idx.is_none()
                 // Intrinsics themselves are not actual "real" functions, so theres no need to
                 // change their ABIs.
@@ -744,7 +748,7 @@ fn fn_abi_adjust_for_abi<'tcx>(
                 // that's how we connect up to LLVM and it's unstable
                 // anyway, we control all calls to it in libstd.
                 Abi::Vector { .. }
-                    if abi != SpecAbi::RustIntrinsic && cx.tcx.sess.target.simd_types_indirect =>
+                    if abi != SpecAbi::RustIntrinsic && tcx.sess.target.simd_types_indirect =>
                 {
                     arg.make_indirect();
                     return;
@@ -793,7 +797,7 @@ fn fn_abi_adjust_for_abi<'tcx>(
     } else {
         fn_abi
             .adjust_for_foreign_abi(cx, abi)
-            .map_err(|err| &*cx.tcx.arena.alloc(FnAbiError::AdjustForForeignAbi(err)))?;
+            .map_err(|err| &*tcx.arena.alloc(FnAbiError::AdjustForForeignAbi(err)))?;
     }
 
     Ok(())
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index c23a7fec5a5..c153fb64c33 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -9,12 +9,11 @@ use rustc_middle::bug;
 use rustc_middle::mir::{CoroutineLayout, CoroutineSavedLocal};
 use rustc_middle::query::Providers;
 use rustc_middle::ty::layout::{
-    FloatExt, IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
+    FloatExt, HasTyCtxt, IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
 };
 use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::{
-    self, AdtDef, CoroutineArgsExt, EarlyBinder, FieldDef, GenericArgsRef, Ty, TyCtxt,
-    TypeVisitableExt,
+    self, AdtDef, CoroutineArgsExt, EarlyBinder, GenericArgsRef, Ty, TyCtxt, TypeVisitableExt,
 };
 use rustc_session::{DataTypeKind, FieldInfo, FieldKind, SizeKind, VariantInfo};
 use rustc_span::sym;
@@ -63,14 +62,14 @@ fn layout_of<'tcx>(
         return tcx.layout_of(param_env.and(ty));
     }
 
-    let cx = LayoutCx { tcx, param_env };
+    let cx = LayoutCx::new(tcx, param_env);
 
     let layout = layout_of_uncached(&cx, ty)?;
     let layout = TyAndLayout { ty, layout };
 
     // If we are running with `-Zprint-type-sizes`, maybe record layouts
     // for dumping later.
-    if cx.tcx.sess.opts.unstable_opts.print_type_sizes {
+    if cx.tcx().sess.opts.unstable_opts.print_type_sizes {
         record_layout_for_printing(&cx, layout);
     }
 
@@ -79,31 +78,58 @@ fn layout_of<'tcx>(
     Ok(layout)
 }
 
-fn error<'tcx>(
-    cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
-    err: LayoutError<'tcx>,
+fn error<'tcx>(cx: &LayoutCx<'tcx>, err: LayoutError<'tcx>) -> &'tcx LayoutError<'tcx> {
+    cx.tcx().arena.alloc(err)
+}
+
+fn map_error<'tcx>(
+    cx: &LayoutCx<'tcx>,
+    ty: Ty<'tcx>,
+    err: LayoutCalculatorError<TyAndLayout<'tcx>>,
 ) -> &'tcx LayoutError<'tcx> {
-    cx.tcx.arena.alloc(err)
+    let err = match err {
+        LayoutCalculatorError::SizeOverflow => {
+            // This is sometimes not a compile error in `check` builds.
+            // See `tests/ui/limits/huge-enum.rs` for an example.
+            LayoutError::SizeOverflow(ty)
+        }
+        LayoutCalculatorError::UnexpectedUnsized(field) => {
+            // This is sometimes not a compile error if there are trivially false where clauses.
+            // See `tests/ui/layout/trivial-bounds-sized.rs` for an example.
+            assert!(field.layout.is_unsized(), "invalid layout error {err:#?}");
+            if !field.ty.is_sized(cx.tcx(), cx.param_env) {
+                cx.tcx().dcx().delayed_bug(format!(
+                    "encountered unexpected unsized field in layout of {ty:?}: {field:#?}"
+                ));
+            }
+            LayoutError::Unknown(ty)
+        }
+        LayoutCalculatorError::EmptyUnion => {
+            // This is always a compile error.
+            cx.tcx().dcx().delayed_bug(format!("computed layout of empty union: {ty:?}"));
+            LayoutError::Unknown(ty)
+        }
+    };
+    error(cx, err)
 }
 
 fn univariant_uninterned<'tcx>(
-    cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+    cx: &LayoutCx<'tcx>,
     ty: Ty<'tcx>,
-    fields: &IndexSlice<FieldIdx, Layout<'_>>,
+    fields: &IndexSlice<FieldIdx, TyAndLayout<'tcx>>,
     repr: &ReprOptions,
     kind: StructKind,
 ) -> Result<LayoutS<FieldIdx, VariantIdx>, &'tcx LayoutError<'tcx>> {
-    let dl = cx.data_layout();
     let pack = repr.pack;
     if pack.is_some() && repr.align.is_some() {
-        cx.tcx.dcx().bug("struct cannot be packed and aligned");
+        cx.tcx().dcx().bug("struct cannot be packed and aligned");
     }
 
-    cx.univariant(dl, fields, repr, kind).ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))
+    cx.calc.univariant(fields, repr, kind).map_err(|err| map_error(cx, ty, err))
 }
 
 fn layout_of_uncached<'tcx>(
-    cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+    cx: &LayoutCx<'tcx>,
     ty: Ty<'tcx>,
 ) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>> {
     // Types that reference `ty::Error` pessimistically don't have a meaningful layout.
@@ -113,7 +139,7 @@ fn layout_of_uncached<'tcx>(
         return Err(error(cx, LayoutError::ReferencesError(guar)));
     }
 
-    let tcx = cx.tcx;
+    let tcx = cx.tcx();
     let param_env = cx.param_env;
     let dl = cx.data_layout();
     let scalar_unit = |value: Primitive| {
@@ -123,9 +149,10 @@ fn layout_of_uncached<'tcx>(
     };
     let scalar = |value: Primitive| tcx.mk_layout(LayoutS::scalar(cx, scalar_unit(value)));
 
-    let univariant = |fields: &IndexSlice<FieldIdx, Layout<'_>>, repr: &ReprOptions, kind| {
-        Ok(tcx.mk_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
-    };
+    let univariant =
+        |fields: &IndexSlice<FieldIdx, TyAndLayout<'tcx>>, repr: &ReprOptions, kind| {
+            Ok(tcx.mk_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
+        };
     debug_assert!(!ty.has_non_region_infer());
 
     Ok(match *ty.kind() {
@@ -191,7 +218,7 @@ fn layout_of_uncached<'tcx>(
         }
 
         // The never type.
-        ty::Never => tcx.mk_layout(cx.layout_of_never_type()),
+        ty::Never => tcx.mk_layout(cx.calc.layout_of_never_type()),
 
         // Potentially-wide pointers.
         ty::Ref(_, pointee, _) | ty::RawPtr(pointee, _) => {
@@ -267,7 +294,7 @@ fn layout_of_uncached<'tcx>(
             };
 
             // Effectively a (ptr, meta) tuple.
-            tcx.mk_layout(cx.scalar_pair(data_ptr, metadata))
+            tcx.mk_layout(cx.calc.scalar_pair(data_ptr, metadata))
         }
 
         ty::Dynamic(_, _, ty::DynStar) => {
@@ -275,7 +302,7 @@ fn layout_of_uncached<'tcx>(
             data.valid_range_mut().start = 0;
             let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
             vtable.valid_range_mut().start = 1;
-            tcx.mk_layout(cx.scalar_pair(data, vtable))
+            tcx.mk_layout(cx.calc.scalar_pair(data, vtable))
         }
 
         // Arrays and slices.
@@ -363,9 +390,7 @@ fn layout_of_uncached<'tcx>(
         ty::Closure(_, args) => {
             let tys = args.as_closure().upvar_tys();
             univariant(
-                &tys.iter()
-                    .map(|ty| Ok(cx.layout_of(ty)?.layout))
-                    .try_collect::<IndexVec<_, _>>()?,
+                &tys.iter().map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?,
                 &ReprOptions::default(),
                 StructKind::AlwaysSized,
             )?
@@ -374,9 +399,7 @@ fn layout_of_uncached<'tcx>(
         ty::CoroutineClosure(_, args) => {
             let tys = args.as_coroutine_closure().upvar_tys();
             univariant(
-                &tys.iter()
-                    .map(|ty| Ok(cx.layout_of(ty)?.layout))
-                    .try_collect::<IndexVec<_, _>>()?,
+                &tys.iter().map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?,
                 &ReprOptions::default(),
                 StructKind::AlwaysSized,
             )?
@@ -387,7 +410,7 @@ fn layout_of_uncached<'tcx>(
                 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
 
             univariant(
-                &tys.iter().map(|k| Ok(cx.layout_of(k)?.layout)).try_collect::<IndexVec<_, _>>()?,
+                &tys.iter().map(|k| cx.layout_of(k)).try_collect::<IndexVec<_, _>>()?,
                 &ReprOptions::default(),
                 kind,
             )?
@@ -527,14 +550,14 @@ fn layout_of_uncached<'tcx>(
                 .map(|v| {
                     v.fields
                         .iter()
-                        .map(|field| Ok(cx.layout_of(field.ty(tcx, args))?.layout))
+                        .map(|field| cx.layout_of(field.ty(tcx, args)))
                         .try_collect::<IndexVec<_, _>>()
                 })
                 .try_collect::<IndexVec<VariantIdx, _>>()?;
 
             if def.is_union() {
                 if def.repr().pack.is_some() && def.repr().align.is_some() {
-                    cx.tcx.dcx().span_delayed_bug(
+                    tcx.dcx().span_delayed_bug(
                         tcx.def_span(def.did()),
                         "union cannot be packed and aligned",
                     );
@@ -542,45 +565,12 @@ fn layout_of_uncached<'tcx>(
                 }
 
                 return Ok(tcx.mk_layout(
-                    cx.layout_of_union(&def.repr(), &variants)
-                        .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?,
+                    cx.calc
+                        .layout_of_union(&def.repr(), &variants)
+                        .map_err(|err| map_error(cx, ty, err))?,
                 ));
             }
 
-            let err_if_unsized = |field: &FieldDef, err_msg: &str| {
-                let field_ty = tcx.type_of(field.did);
-                let is_unsized = tcx
-                    .try_instantiate_and_normalize_erasing_regions(args, cx.param_env, field_ty)
-                    .map(|f| !f.is_sized(tcx, cx.param_env))
-                    .map_err(|e| {
-                        error(
-                            cx,
-                            LayoutError::NormalizationFailure(field_ty.instantiate_identity(), e),
-                        )
-                    })?;
-
-                if is_unsized {
-                    cx.tcx.dcx().span_delayed_bug(tcx.def_span(def.did()), err_msg.to_owned());
-                    Err(error(cx, LayoutError::Unknown(ty)))
-                } else {
-                    Ok(())
-                }
-            };
-
-            if def.is_struct() {
-                if let Some((_, fields_except_last)) =
-                    def.non_enum_variant().fields.raw.split_last()
-                {
-                    for f in fields_except_last {
-                        err_if_unsized(f, "only the last field of a struct can be unsized")?;
-                    }
-                }
-            } else {
-                for f in def.all_fields() {
-                    err_if_unsized(f, &format!("{}s cannot have unsized fields", def.descr()))?;
-                }
-            }
-
             let get_discriminant_type =
                 |min, max| Integer::repr_discr(tcx, ty, &def.repr(), min, max);
 
@@ -603,19 +593,24 @@ fn layout_of_uncached<'tcx>(
                     !tcx.type_of(last_field.did).instantiate_identity().is_sized(tcx, param_env)
                 });
 
-            let Some(layout) = cx.layout_of_struct_or_enum(
-                &def.repr(),
-                &variants,
-                def.is_enum(),
-                def.is_unsafe_cell(),
-                tcx.layout_scalar_valid_range(def.did()),
-                get_discriminant_type,
-                discriminants_iter(),
-                dont_niche_optimize_enum,
-                !maybe_unsized,
-            ) else {
-                return Err(error(cx, LayoutError::SizeOverflow(ty)));
-            };
+            let layout = cx
+                .calc
+                .layout_of_struct_or_enum(
+                    &def.repr(),
+                    &variants,
+                    def.is_enum(),
+                    def.is_unsafe_cell(),
+                    tcx.layout_scalar_valid_range(def.did()),
+                    get_discriminant_type,
+                    discriminants_iter(),
+                    dont_niche_optimize_enum,
+                    !maybe_unsized,
+                )
+                .map_err(|err| map_error(cx, ty, err))?;
+
+            if !maybe_unsized && layout.is_unsized() {
+                bug!("got unsized layout for type that cannot be unsized {ty:?}: {layout:#?}");
+            }
 
             // If the struct tail is sized and can be unsized, check that unsizing doesn't move the fields around.
             if cfg!(debug_assertions)
@@ -624,9 +619,9 @@ fn layout_of_uncached<'tcx>(
             {
                 let mut variants = variants;
                 let tail_replacement = cx.layout_of(Ty::new_slice(tcx, tcx.types.u8)).unwrap();
-                *variants[FIRST_VARIANT].raw.last_mut().unwrap() = tail_replacement.layout;
+                *variants[FIRST_VARIANT].raw.last_mut().unwrap() = tail_replacement;
 
-                let Some(unsized_layout) = cx.layout_of_struct_or_enum(
+                let Ok(unsized_layout) = cx.calc.layout_of_struct_or_enum(
                     &def.repr(),
                     &variants,
                     def.is_enum(),
@@ -809,13 +804,13 @@ fn coroutine_saved_local_eligibility(
 
 /// Compute the full coroutine layout.
 fn coroutine_layout<'tcx>(
-    cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+    cx: &LayoutCx<'tcx>,
     ty: Ty<'tcx>,
     def_id: hir::def_id::DefId,
     args: GenericArgsRef<'tcx>,
 ) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>> {
     use SavedLocalEligibility::*;
-    let tcx = cx.tcx;
+    let tcx = cx.tcx();
     let instantiate_field = |ty: Ty<'tcx>| EarlyBinder::bind(ty).instantiate(tcx, args);
 
     let Some(info) = tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty()) else {
@@ -832,21 +827,24 @@ fn coroutine_layout<'tcx>(
     let max_discr = (info.variant_fields.len() - 1) as u128;
     let discr_int = Integer::fit_unsigned(max_discr);
     let tag = Scalar::Initialized {
-        value: Primitive::Int(discr_int, false),
+        value: Primitive::Int(discr_int, /* signed = */ false),
         valid_range: WrappingRange { start: 0, end: max_discr },
     };
-    let tag_layout = cx.tcx.mk_layout(LayoutS::scalar(cx, tag));
+    let tag_layout = TyAndLayout {
+        ty: discr_int.to_ty(tcx, /* signed = */ false),
+        layout: tcx.mk_layout(LayoutS::scalar(cx, tag)),
+    };
 
     let promoted_layouts = ineligible_locals.iter().map(|local| {
         let field_ty = instantiate_field(info.field_tys[local].ty);
         let uninit_ty = Ty::new_maybe_uninit(tcx, field_ty);
-        Ok(cx.spanned_layout_of(uninit_ty, info.field_tys[local].source_info.span)?.layout)
+        cx.spanned_layout_of(uninit_ty, info.field_tys[local].source_info.span)
     });
     let prefix_layouts = args
         .as_coroutine()
         .prefix_tys()
         .iter()
-        .map(|ty| Ok(cx.layout_of(ty)?.layout))
+        .map(|ty| cx.layout_of(ty))
         .chain(iter::once(Ok(tag_layout)))
         .chain(promoted_layouts)
         .try_collect::<IndexVec<_, _>>()?;
@@ -920,9 +918,7 @@ fn coroutine_layout<'tcx>(
             let mut variant = univariant_uninterned(
                 cx,
                 ty,
-                &variant_only_tys
-                    .map(|ty| Ok(cx.layout_of(ty)?.layout))
-                    .try_collect::<IndexVec<_, _>>()?,
+                &variant_only_tys.map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?,
                 &ReprOptions::default(),
                 StructKind::Prefixed(prefix_size, prefix_align.abi),
             )?;
@@ -1017,7 +1013,7 @@ fn coroutine_layout<'tcx>(
     Ok(layout)
 }
 
-fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>) {
+fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx>, layout: TyAndLayout<'tcx>) {
     // Ignore layouts that are done with non-empty environments or
     // non-monomorphic layouts, as the user only wants to see the stuff
     // resulting from the final codegen session.
@@ -1028,7 +1024,7 @@ fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: T
     // (delay format until we actually need it)
     let record = |kind, packed, opt_discr_size, variants| {
         let type_desc = with_no_trimmed_paths!(format!("{}", layout.ty));
-        cx.tcx.sess.code_stats.record_type_size(
+        cx.tcx().sess.code_stats.record_type_size(
             kind,
             type_desc,
             layout.align.abi,
@@ -1068,7 +1064,7 @@ fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: T
 }
 
 fn variant_info_for_adt<'tcx>(
-    cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+    cx: &LayoutCx<'tcx>,
     layout: TyAndLayout<'tcx>,
     adt_def: AdtDef<'tcx>,
 ) -> (Vec<VariantInfo>, Option<Size>) {
@@ -1140,7 +1136,7 @@ fn variant_info_for_adt<'tcx>(
 }
 
 fn variant_info_for_coroutine<'tcx>(
-    cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+    cx: &LayoutCx<'tcx>,
     layout: TyAndLayout<'tcx>,
     def_id: DefId,
     args: ty::GenericArgsRef<'tcx>,
@@ -1151,8 +1147,8 @@ fn variant_info_for_coroutine<'tcx>(
         return (vec![], None);
     };
 
-    let coroutine = cx.tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty()).unwrap();
-    let upvar_names = cx.tcx.closure_saved_names_of_captured_variables(def_id);
+    let coroutine = cx.tcx().coroutine_layout(def_id, args.as_coroutine().kind_ty()).unwrap();
+    let upvar_names = cx.tcx().closure_saved_names_of_captured_variables(def_id);
 
     let mut upvars_size = Size::ZERO;
     let upvar_fields: Vec<_> = args
diff --git a/compiler/rustc_ty_utils/src/layout_sanity_check.rs b/compiler/rustc_ty_utils/src/layout_sanity_check.rs
index 8378237fe2f..be0a7c5ee89 100644
--- a/compiler/rustc_ty_utils/src/layout_sanity_check.rs
+++ b/compiler/rustc_ty_utils/src/layout_sanity_check.rs
@@ -1,24 +1,22 @@
 use std::assert_matches::assert_matches;
 
 use rustc_middle::bug;
-use rustc_middle::ty::layout::{LayoutCx, TyAndLayout};
-use rustc_middle::ty::TyCtxt;
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutCx, TyAndLayout};
 use rustc_target::abi::*;
 
 /// Enforce some basic invariants on layouts.
-pub(super) fn sanity_check_layout<'tcx>(
-    cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
-    layout: &TyAndLayout<'tcx>,
-) {
+pub(super) fn sanity_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayout<'tcx>) {
+    let tcx = cx.tcx();
+
     // Type-level uninhabitedness should always imply ABI uninhabitedness.
-    if layout.ty.is_privately_uninhabited(cx.tcx, cx.param_env) {
+    if layout.ty.is_privately_uninhabited(tcx, cx.param_env) {
         assert!(layout.abi.is_uninhabited());
     }
 
     if layout.size.bytes() % layout.align.abi.bytes() != 0 {
         bug!("size is not a multiple of align, in the following layout:\n{layout:#?}");
     }
-    if layout.size.bytes() >= cx.tcx.data_layout.obj_size_bound() {
+    if layout.size.bytes() >= tcx.data_layout.obj_size_bound() {
         bug!("size is too large, in the following layout:\n{layout:#?}");
     }
 
@@ -28,8 +26,8 @@ pub(super) fn sanity_check_layout<'tcx>(
     }
 
     /// Yields non-ZST fields of the type
-    fn non_zst_fields<'a, 'tcx>(
-        cx: &'a LayoutCx<'tcx, TyCtxt<'tcx>>,
+    fn non_zst_fields<'tcx, 'a>(
+        cx: &'a LayoutCx<'tcx>,
         layout: &'a TyAndLayout<'tcx>,
     ) -> impl Iterator<Item = (Size, TyAndLayout<'tcx>)> + 'a {
         (0..layout.layout.fields().count()).filter_map(|i| {
@@ -43,10 +41,7 @@ pub(super) fn sanity_check_layout<'tcx>(
         })
     }
 
-    fn skip_newtypes<'tcx>(
-        cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
-        layout: &TyAndLayout<'tcx>,
-    ) -> TyAndLayout<'tcx> {
+    fn skip_newtypes<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayout<'tcx>) -> TyAndLayout<'tcx> {
         if matches!(layout.layout.variants(), Variants::Multiple { .. }) {
             // Definitely not a newtype of anything.
             return *layout;
@@ -69,7 +64,7 @@ pub(super) fn sanity_check_layout<'tcx>(
         *layout
     }
 
-    fn check_layout_abi<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: &TyAndLayout<'tcx>) {
+    fn check_layout_abi<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayout<'tcx>) {
         // Verify the ABI mandated alignment and size.
         let align = layout.abi.inherent_align(cx).map(|align| align.abi);
         let size = layout.abi.inherent_size(cx);
diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs
index 45c2b6a6a0f..8fc43cb1875 100644
--- a/library/core/src/fmt/mod.rs
+++ b/library/core/src/fmt/mod.rs
@@ -975,9 +975,17 @@ pub trait UpperHex {
 /// `p` formatting.
 ///
 /// The `Pointer` trait should format its output as a memory location. This is commonly presented
-/// as hexadecimal.
+/// as hexadecimal. For more information on formatters, see [the module-level documentation][module].
 ///
-/// For more information on formatters, see [the module-level documentation][module].
+/// Printing of pointers is not a reliable way to discover how Rust programs are implemented.
+/// The act of reading an address changes the program itself, and may change how the data is represented
+/// in memory, and may affect which optimizations are applied to the code.
+///
+/// The printed pointer values are not guaranteed to be stable nor unique identifiers of objects.
+/// Rust allows moving values to different memory locations, and may reuse the same memory locations
+/// for different purposes.
+///
+/// There is no guarantee that the printed value can be converted back to a pointer.
 ///
 /// [module]: ../../std/fmt/index.html
 ///
diff --git a/library/std/src/process.rs b/library/std/src/process.rs
index a155855029e..60d452465da 100644
--- a/library/std/src/process.rs
+++ b/library/std/src/process.rs
@@ -1910,10 +1910,14 @@ impl crate::error::Error for ExitStatusError {}
 /// to its parent under normal termination.
 ///
 /// `ExitCode` is intended to be consumed only by the standard library (via
-/// [`Termination::report()`]), and intentionally does not provide accessors like
-/// `PartialEq`, `Eq`, or `Hash`. Instead the standard library provides the
-/// canonical `SUCCESS` and `FAILURE` exit codes as well as `From<u8> for
-/// ExitCode` for constructing other arbitrary exit codes.
+/// [`Termination::report()`]). For forwards compatibility with potentially
+/// unusual targets, this type currently does not provide `Eq`, `Hash`, or
+/// access to the raw value. This type does provide `PartialEq` for
+/// comparison, but note that there may potentially be multiple failure
+/// codes, some of which will _not_ compare equal to `ExitCode::FAILURE`.
+/// The standard library provides the canonical `SUCCESS` and `FAILURE`
+/// exit codes as well as `From<u8> for ExitCode` for constructing other
+/// arbitrary exit codes.
 ///
 /// # Portability
 ///
@@ -1952,7 +1956,7 @@ impl crate::error::Error for ExitStatusError {}
 ///     ExitCode::SUCCESS
 /// }
 /// ```
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, PartialEq)]
 #[stable(feature = "process_exitcode", since = "1.61.0")]
 pub struct ExitCode(imp::ExitCode);
 
diff --git a/src/bootstrap/src/core/build_steps/doc.rs b/src/bootstrap/src/core/build_steps/doc.rs
index 73d9e3f6793..3755f4a33cb 100644
--- a/src/bootstrap/src/core/build_steps/doc.rs
+++ b/src/bootstrap/src/core/build_steps/doc.rs
@@ -1013,6 +1013,14 @@ macro_rules! tool_doc {
     }
 }
 
+// NOTE: make sure to register these in `Builder::get_step_description`.
+tool_doc!(
+    BuildHelper,
+    "src/tools/build_helper",
+    rustc_tool = false,
+    is_library = true,
+    crates = ["build_helper"]
+);
 tool_doc!(Rustdoc, "src/tools/rustdoc", crates = ["rustdoc", "rustdoc-json-types"]);
 tool_doc!(Rustfmt, "src/tools/rustfmt", crates = ["rustfmt-nightly", "rustfmt-config_proc_macro"]);
 tool_doc!(Clippy, "src/tools/clippy", crates = ["clippy_config", "clippy_utils"]);
diff --git a/src/bootstrap/src/core/builder.rs b/src/bootstrap/src/core/builder.rs
index f49be2b3c68..003b11ec7cf 100644
--- a/src/bootstrap/src/core/builder.rs
+++ b/src/bootstrap/src/core/builder.rs
@@ -944,6 +944,7 @@ impl<'a> Builder<'a> {
                 doc::Bootstrap,
                 doc::Releases,
                 doc::RunMakeSupport,
+                doc::BuildHelper,
             ),
             Kind::Dist => describe!(
                 dist::Docs,
diff --git a/src/bootstrap/src/lib.rs b/src/bootstrap/src/lib.rs
index df2acd8b76b..4ed2c72fd55 100644
--- a/src/bootstrap/src/lib.rs
+++ b/src/bootstrap/src/lib.rs
@@ -552,11 +552,7 @@ impl Build {
             // Look for `submodule.$name.path = $path`
             // Sample output: `submodule.src/rust-installer.path src/tools/rust-installer`
             let submodule = line.split_once(' ').unwrap().1;
-            let path = Path::new(submodule);
-            // Don't update the submodule unless it's already been cloned.
-            if GitInfo::new(false, path).is_managed_git_subrepository() {
-                self.config.update_submodule(submodule);
-            }
+            self.update_existing_submodule(submodule);
         }
     }
 
diff --git a/src/tools/miri/src/eval.rs b/src/tools/miri/src/eval.rs
index 0850a8f24d9..f95177684ae 100644
--- a/src/tools/miri/src/eval.rs
+++ b/src/tools/miri/src/eval.rs
@@ -277,7 +277,7 @@ pub fn create_ecx<'tcx>(
     config: &MiriConfig,
 ) -> InterpResult<'tcx, InterpCx<'tcx, MiriMachine<'tcx>>> {
     let param_env = ty::ParamEnv::reveal_all();
-    let layout_cx = LayoutCx { tcx, param_env };
+    let layout_cx = LayoutCx::new(tcx, param_env);
     let mut ecx =
         InterpCx::new(tcx, rustc_span::DUMMY_SP, param_env, MiriMachine::new(config, layout_cx));
 
diff --git a/src/tools/miri/src/machine.rs b/src/tools/miri/src/machine.rs
index df55902decd..c2b0aedbde1 100644
--- a/src/tools/miri/src/machine.rs
+++ b/src/tools/miri/src/machine.rs
@@ -21,7 +21,7 @@ use rustc_middle::{
     query::TyCtxtAt,
     ty::{
         self,
-        layout::{LayoutCx, LayoutError, LayoutOf, TyAndLayout},
+        layout::{HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout},
         Instance, Ty, TyCtxt,
     },
 };
@@ -381,8 +381,8 @@ pub struct PrimitiveLayouts<'tcx> {
 }
 
 impl<'tcx> PrimitiveLayouts<'tcx> {
-    fn new(layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Result<Self, &'tcx LayoutError<'tcx>> {
-        let tcx = layout_cx.tcx;
+    fn new(layout_cx: LayoutCx<'tcx>) -> Result<Self, &'tcx LayoutError<'tcx>> {
+        let tcx = layout_cx.tcx();
         let mut_raw_ptr = Ty::new_mut_ptr(tcx, tcx.types.unit);
         let const_raw_ptr = Ty::new_imm_ptr(tcx, tcx.types.unit);
         Ok(Self {
@@ -596,14 +596,13 @@ pub struct MiriMachine<'tcx> {
 }
 
 impl<'tcx> MiriMachine<'tcx> {
-    pub(crate) fn new(config: &MiriConfig, layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Self {
-        let tcx = layout_cx.tcx;
+    pub(crate) fn new(config: &MiriConfig, layout_cx: LayoutCx<'tcx>) -> Self {
+        let tcx = layout_cx.tcx();
         let local_crates = helpers::get_local_crates(tcx);
         let layouts =
             PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types");
         let profiler = config.measureme_out.as_ref().map(|out| {
-            let crate_name = layout_cx
-                .tcx
+            let crate_name = tcx
                 .sess
                 .opts
                 .crate_name
@@ -701,7 +700,7 @@ impl<'tcx> MiriMachine<'tcx> {
             clock: Clock::new(config.isolated_op == IsolatedOp::Allow),
             #[cfg(unix)]
             native_lib: config.native_lib.as_ref().map(|lib_file_path| {
-                let target_triple = layout_cx.tcx.sess.opts.target_triple.triple();
+                let target_triple = tcx.sess.opts.target_triple.triple();
                 // Check if host target == the session target.
                 if env!("TARGET") != target_triple {
                     panic!(
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs
index 47cc2a2f1e6..25362d23d55 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs
@@ -1,13 +1,13 @@
 //! Compute the binary representation of a type
 
-use std::{borrow::Cow, fmt};
+use std::fmt;
 
 use base_db::salsa::Cycle;
 use chalk_ir::{AdtId, FloatTy, IntTy, TyKind, UintTy};
 use hir_def::{
     layout::{
-        Abi, FieldsShape, Float, Integer, LayoutCalculator, LayoutS, Primitive, ReprOptions,
-        Scalar, Size, StructKind, TargetDataLayout, WrappingRange,
+        Abi, FieldsShape, Float, Integer, LayoutCalculator, LayoutCalculatorError, LayoutS,
+        Primitive, ReprOptions, Scalar, Size, StructKind, TargetDataLayout, WrappingRange,
     },
     LocalFieldId, StructId,
 };
@@ -15,7 +15,6 @@ use la_arena::{Idx, RawIdx};
 use rustc_abi::AddressSpace;
 use rustc_index::{IndexSlice, IndexVec};
 
-use stdx::never;
 use triomphe::Arc;
 
 use crate::{
@@ -107,19 +106,24 @@ impl fmt::Display for LayoutError {
     }
 }
 
-struct LayoutCx<'a> {
-    target: &'a TargetDataLayout,
+impl<F> From<LayoutCalculatorError<F>> for LayoutError {
+    fn from(err: LayoutCalculatorError<F>) -> Self {
+        match err {
+            LayoutCalculatorError::UnexpectedUnsized(_) | LayoutCalculatorError::EmptyUnion => {
+                LayoutError::Unknown
+            }
+            LayoutCalculatorError::SizeOverflow => LayoutError::SizeOverflow,
+        }
+    }
 }
 
-impl<'a> LayoutCalculator for LayoutCx<'a> {
-    type TargetDataLayoutRef = &'a TargetDataLayout;
-
-    fn delayed_bug(&self, txt: impl Into<Cow<'static, str>>) {
-        never!("{}", txt.into());
-    }
+struct LayoutCx<'a> {
+    calc: LayoutCalculator<&'a TargetDataLayout>,
+}
 
-    fn current_data_layout(&self) -> &'a TargetDataLayout {
-        self.target
+impl<'a> LayoutCx<'a> {
+    fn new(target: &'a TargetDataLayout) -> Self {
+        Self { calc: LayoutCalculator::new(target) }
     }
 }
 
@@ -205,8 +209,8 @@ pub fn layout_of_ty_query(
     let Ok(target) = db.target_data_layout(krate) else {
         return Err(LayoutError::TargetLayoutNotAvailable);
     };
-    let cx = LayoutCx { target: &target };
-    let dl = cx.current_data_layout();
+    let dl = &*target;
+    let cx = LayoutCx::new(dl);
     let ty = normalize(db, trait_env.clone(), ty);
     let result = match ty.kind(Interner) {
         TyKind::Adt(AdtId(def), subst) => {
@@ -281,7 +285,7 @@ pub fn layout_of_ty_query(
                 .collect::<Result<Vec<_>, _>>()?;
             let fields = fields.iter().map(|it| &**it).collect::<Vec<_>>();
             let fields = fields.iter().collect::<IndexVec<_, _>>();
-            cx.univariant(dl, &fields, &ReprOptions::default(), kind).ok_or(LayoutError::Unknown)?
+            cx.calc.univariant(&fields, &ReprOptions::default(), kind)?
         }
         TyKind::Array(element, count) => {
             let count = try_const_usize(db, count).ok_or(LayoutError::HasErrorConst)? as u64;
@@ -367,12 +371,12 @@ pub fn layout_of_ty_query(
             };
 
             // Effectively a (ptr, meta) tuple.
-            cx.scalar_pair(data_ptr, metadata)
+            cx.calc.scalar_pair(data_ptr, metadata)
         }
-        TyKind::FnDef(_, _) => layout_of_unit(&cx, dl)?,
-        TyKind::Never => cx.layout_of_never_type(),
+        TyKind::FnDef(_, _) => layout_of_unit(&cx)?,
+        TyKind::Never => cx.calc.layout_of_never_type(),
         TyKind::Dyn(_) | TyKind::Foreign(_) => {
-            let mut unit = layout_of_unit(&cx, dl)?;
+            let mut unit = layout_of_unit(&cx)?;
             match &mut unit.abi {
                 Abi::Aggregate { sized } => *sized = false,
                 _ => return Err(LayoutError::Unknown),
@@ -414,8 +418,7 @@ pub fn layout_of_ty_query(
                 .collect::<Result<Vec<_>, _>>()?;
             let fields = fields.iter().map(|it| &**it).collect::<Vec<_>>();
             let fields = fields.iter().collect::<IndexVec<_, _>>();
-            cx.univariant(dl, &fields, &ReprOptions::default(), StructKind::AlwaysSized)
-                .ok_or(LayoutError::Unknown)?
+            cx.calc.univariant(&fields, &ReprOptions::default(), StructKind::AlwaysSized)?
         }
         TyKind::Coroutine(_, _) | TyKind::CoroutineWitness(_, _) => {
             return Err(LayoutError::NotImplemented)
@@ -447,14 +450,14 @@ pub fn layout_of_ty_recover(
     Err(LayoutError::RecursiveTypeWithoutIndirection)
 }
 
-fn layout_of_unit(cx: &LayoutCx<'_>, dl: &TargetDataLayout) -> Result<Layout, LayoutError> {
-    cx.univariant::<RustcFieldIdx, RustcEnumVariantIdx, &&Layout>(
-        dl,
-        IndexSlice::empty(),
-        &ReprOptions::default(),
-        StructKind::AlwaysSized,
-    )
-    .ok_or(LayoutError::Unknown)
+fn layout_of_unit(cx: &LayoutCx<'_>) -> Result<Layout, LayoutError> {
+    cx.calc
+        .univariant::<RustcFieldIdx, RustcEnumVariantIdx, &&Layout>(
+            IndexSlice::empty(),
+            &ReprOptions::default(),
+            StructKind::AlwaysSized,
+        )
+        .map_err(Into::into)
 }
 
 fn struct_tail_erasing_lifetimes(db: &dyn HirDatabase, pointee: Ty) -> Ty {
diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs
index 3463e690972..a060ebfe6be 100644
--- a/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs
+++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs
@@ -5,7 +5,7 @@ use std::{cmp, ops::Bound};
 use base_db::salsa::Cycle;
 use hir_def::{
     data::adt::VariantData,
-    layout::{Integer, LayoutCalculator, ReprOptions, TargetDataLayout},
+    layout::{Integer, ReprOptions, TargetDataLayout},
     AdtId, VariantId,
 };
 use intern::sym;
@@ -36,8 +36,8 @@ pub fn layout_of_adt_query(
     let Ok(target) = db.target_data_layout(krate) else {
         return Err(LayoutError::TargetLayoutNotAvailable);
     };
-    let cx = LayoutCx { target: &target };
-    let dl = cx.current_data_layout();
+    let dl = &*target;
+    let cx = LayoutCx::new(dl);
     let handle_variant = |def: VariantId, var: &VariantData| {
         var.fields()
             .iter()
@@ -73,9 +73,9 @@ pub fn layout_of_adt_query(
         .collect::<SmallVec<[_; 1]>>();
     let variants = variants.iter().map(|it| it.iter().collect()).collect::<IndexVec<_, _>>();
     let result = if matches!(def, AdtId::UnionId(..)) {
-        cx.layout_of_union(&repr, &variants).ok_or(LayoutError::Unknown)?
+        cx.calc.layout_of_union(&repr, &variants)?
     } else {
-        cx.layout_of_struct_or_enum(
+        cx.calc.layout_of_struct_or_enum(
             &repr,
             &variants,
             matches!(def, AdtId::EnumId(..)),
@@ -103,8 +103,7 @@ pub fn layout_of_adt_query(
                     .next()
                     .and_then(|it| it.iter().last().map(|it| !it.is_unsized()))
                     .unwrap_or(true),
-        )
-        .ok_or(LayoutError::SizeOverflow)?
+        )?
     };
     Ok(Arc::new(result))
 }
diff --git a/tests/assembly/simd/reduce-fadd-unordered.rs b/tests/assembly/simd/reduce-fadd-unordered.rs
new file mode 100644
index 00000000000..ade60ba184c
--- /dev/null
+++ b/tests/assembly/simd/reduce-fadd-unordered.rs
@@ -0,0 +1,30 @@
+//@ revisions: x86_64 aarch64
+//@ assembly-output: emit-asm
+//@ compile-flags: --crate-type=lib -O
+//@[aarch64] only-aarch64
+//@[x86_64] only-x86_64
+//@[x86_64] compile-flags: -Ctarget-feature=+sse3
+//@ ignore-sgx Test incompatible with LVI mitigations
+#![feature(portable_simd)]
+#![feature(core_intrinsics)]
+use std::intrinsics::simd as intrinsics;
+use std::simd::*;
+// Regression test for https://github.com/rust-lang/rust/issues/130028
+// This intrinsic produces much worse code if you use +0.0 instead of -0.0 because
+// +0.0 isn't as easy to algebraically reassociate, even using LLVM's reassoc attribute!
+// It would emit about an extra fadd, depending on the architecture.
+
+// CHECK-LABEL: reduce_fadd_negative_zero
+pub unsafe fn reduce_fadd_negative_zero(v: f32x4) -> f32 {
+    // x86_64: addps
+    // x86_64-NEXT: movshdup
+    // x86_64-NEXT: addss
+    // x86_64-NOT: xorps
+
+    // aarch64: faddp
+    // aarch64-NEXT: faddp
+
+    // CHECK-NOT: {{f?}}add{{p?s*}}
+    // CHECK: ret
+    intrinsics::simd_reduce_add_unordered(v)
+}
diff --git a/tests/crashes/124182.rs b/tests/crashes/124182.rs
deleted file mode 100644
index 46948207df3..00000000000
--- a/tests/crashes/124182.rs
+++ /dev/null
@@ -1,22 +0,0 @@
-//@ known-bug: #124182
-struct LazyLock<T> {
-    data: (Copy, fn() -> T),
-}
-
-impl<T> LazyLock<T> {
-    pub const fn new(f: fn() -> T) -> LazyLock<T> {
-        LazyLock { data: (None, f) }
-    }
-}
-
-struct A<T = i32>(Option<T>);
-
-impl<T> Default for A<T> {
-    fn default() -> Self {
-        A(None)
-    }
-}
-
-static EMPTY_SET: LazyLock<A<i32>> = LazyLock::new(A::default);
-
-fn main() {}
diff --git a/tests/crashes/126939.rs b/tests/crashes/126939.rs
deleted file mode 100644
index 1edf7484606..00000000000
--- a/tests/crashes/126939.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-//@ known-bug: rust-lang/rust#126939
-
-struct MySlice<T: Copy>(bool, T);
-type MySliceBool = MySlice<[bool]>;
-
-use std::mem;
-
-struct P2<T> {
-    a: T,
-    b: MySliceBool,
-}
-
-macro_rules! check {
-    ($t:ty, $align:expr) => ({
-        assert_eq!(mem::align_of::<$t>(), $align);
-    });
-}
-
-pub fn main() {
-    check!(P2<u8>, 1);
-}
diff --git a/tests/crashes/129850.rs b/tests/crashes/129850.rs
deleted file mode 100644
index 9c04805587a..00000000000
--- a/tests/crashes/129850.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-//@ known-bug: rust-lang/rust#129850
-
-pub trait Foo2 {
-    fn bar<'a: 'a>(&'a mut self) -> impl Sized + use<'static>;
-}
-
-impl Foo2 for () {
-    fn bar<'a: 'a>(&'a mut self) -> impl Sized + 'a {}
-}
diff --git a/tests/crashes/130372-1.rs b/tests/crashes/130372-1.rs
deleted file mode 100644
index 5d58c14e7ab..00000000000
--- a/tests/crashes/130372-1.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-//@ known-bug: rust-lang/rust#130372
-
-pub fn variadic_fn(n: usize, mut args: ...) {}
-
-reuse variadic_fn;
-
-fn main() {
-        variadic_fn();
-}
diff --git a/tests/crashes/130372-2.rs b/tests/crashes/130372-2.rs
deleted file mode 100644
index 46404191e32..00000000000
--- a/tests/crashes/130372-2.rs
+++ /dev/null
@@ -1,11 +0,0 @@
-//@ known-bug: rust-lang/rust#130372
-
-pub fn test_va_copy(_: u64, mut ap: ...) {}
-
-pub fn main() {
-    unsafe {
-        test_va_copy();
-
-        call(x);
-    }
-}
diff --git a/tests/crashes/130372-3.rs b/tests/crashes/130372-3.rs
deleted file mode 100644
index 6e1c57437c8..00000000000
--- a/tests/crashes/130372-3.rs
+++ /dev/null
@@ -1,7 +0,0 @@
-//@ known-bug: rust-lang/rust#130372
-
-fn bar() -> impl Fn() {
-    wrap()
-}
-
-fn wrap(...: impl ...) -> impl Fn() {}
diff --git a/tests/crashes/130399.rs b/tests/crashes/130399.rs
deleted file mode 100644
index 2248c8c0124..00000000000
--- a/tests/crashes/130399.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-//@ known-bug: rust-lang/rust#130399
-
-fn elided(main: &()) -> impl Sized + use<main> {}
-
-fn main() {}
diff --git a/tests/crashes/129262.rs b/tests/ui/async-await/async-closures/closure-shim-borrowck-error.rs
index c430af35988..4cbbefb0f52 100644
--- a/tests/crashes/129262.rs
+++ b/tests/ui/async-await/async-closures/closure-shim-borrowck-error.rs
@@ -1,4 +1,3 @@
-//@ known-bug: rust-lang/rust#129262
 //@ compile-flags: -Zvalidate-mir --edition=2018 --crate-type=lib -Copt-level=3
 
 #![feature(async_closure)]
@@ -11,6 +10,7 @@ fn needs_fn_mut<T>(mut x: impl FnMut() -> T) {
 
 fn hello(x: Ty) {
     needs_fn_mut(async || {
+        //~^ ERROR cannot move out of `x`
         x.hello();
     });
 }
diff --git a/tests/ui/async-await/async-closures/closure-shim-borrowck-error.stderr b/tests/ui/async-await/async-closures/closure-shim-borrowck-error.stderr
new file mode 100644
index 00000000000..bab26c19482
--- /dev/null
+++ b/tests/ui/async-await/async-closures/closure-shim-borrowck-error.stderr
@@ -0,0 +1,24 @@
+error[E0507]: cannot move out of `x` which is behind a mutable reference
+  --> $DIR/closure-shim-borrowck-error.rs:12:18
+   |
+LL |     needs_fn_mut(async || {
+   |                  ^^^^^^^^ `x` is moved here
+LL |
+LL |         x.hello();
+   |         -
+   |         |
+   |         variable moved due to use in coroutine
+   |         move occurs because `x` has type `Ty`, which does not implement the `Copy` trait
+   |
+note: if `Ty` implemented `Clone`, you could clone the value
+  --> $DIR/closure-shim-borrowck-error.rs:18:1
+   |
+LL |         x.hello();
+   |         - you could clone this value
+...
+LL | struct Ty;
+   | ^^^^^^^^^ consider implementing `Clone` for this type
+
+error: aborting due to 1 previous error
+
+For more information about this error, try `rustc --explain E0507`.
diff --git a/tests/ui/async-await/async-closures/debuginfo-by-move-body.rs b/tests/ui/async-await/async-closures/debuginfo-by-move-body.rs
new file mode 100644
index 00000000000..6f339f0c8ef
--- /dev/null
+++ b/tests/ui/async-await/async-closures/debuginfo-by-move-body.rs
@@ -0,0 +1,19 @@
+//@ aux-build:block-on.rs
+//@ edition: 2021
+//@ build-pass
+//@ compile-flags: -Cdebuginfo=2
+
+#![feature(async_closure)]
+
+extern crate block_on;
+
+async fn call_once(f: impl async FnOnce()) {
+    f().await;
+}
+
+pub fn main() {
+    block_on::block_on(async {
+        let async_closure = async move || {};
+        call_once(async_closure).await;
+    });
+}
diff --git a/tests/ui/error-codes/E0799.rs b/tests/ui/error-codes/E0799.rs
new file mode 100644
index 00000000000..a1e5b532669
--- /dev/null
+++ b/tests/ui/error-codes/E0799.rs
@@ -0,0 +1,4 @@
+fn test() -> impl Sized + use<main> {}
+//~^ ERROR E0799
+
+fn main() {}
diff --git a/tests/ui/error-codes/E0799.stderr b/tests/ui/error-codes/E0799.stderr
new file mode 100644
index 00000000000..3639424e466
--- /dev/null
+++ b/tests/ui/error-codes/E0799.stderr
@@ -0,0 +1,9 @@
+error[E0799]: expected type or const parameter, found function `main`
+  --> $DIR/E0799.rs:1:31
+   |
+LL | fn test() -> impl Sized + use<main> {}
+   |                               ^^^^ not a type or const parameter
+
+error: aborting due to 1 previous error
+
+For more information about this error, try `rustc --explain E0799`.
diff --git a/tests/ui/error-codes/E0800.rs b/tests/ui/error-codes/E0800.rs
new file mode 100644
index 00000000000..6112157feca
--- /dev/null
+++ b/tests/ui/error-codes/E0800.rs
@@ -0,0 +1,4 @@
+fn test() -> impl Sized + use<Missing> {}
+//~^ ERROR E0800
+
+fn main() {}
diff --git a/tests/ui/error-codes/E0800.stderr b/tests/ui/error-codes/E0800.stderr
new file mode 100644
index 00000000000..282981a9173
--- /dev/null
+++ b/tests/ui/error-codes/E0800.stderr
@@ -0,0 +1,9 @@
+error[E0800]: cannot find type or const parameter `Missing` in this scope
+  --> $DIR/E0800.rs:1:31
+   |
+LL | fn test() -> impl Sized + use<Missing> {}
+   |                               ^^^^^^^ not found in this scope
+
+error: aborting due to 1 previous error
+
+For more information about this error, try `rustc --explain E0800`.
diff --git a/tests/ui/impl-trait/precise-capturing/bad-params.rs b/tests/ui/impl-trait/precise-capturing/bad-params.rs
index 17b517abd74..d1ec48df48c 100644
--- a/tests/ui/impl-trait/precise-capturing/bad-params.rs
+++ b/tests/ui/impl-trait/precise-capturing/bad-params.rs
@@ -1,8 +1,8 @@
 fn missing() -> impl Sized + use<T> {}
-//~^ ERROR cannot find type `T` in this scope
+//~^ ERROR cannot find type or const parameter `T` in this scope
 
 fn missing_self() -> impl Sized + use<Self> {}
-//~^ ERROR cannot find type `Self` in this scope
+//~^ ERROR cannot find type or const parameter `Self` in this scope
 
 struct MyType;
 impl MyType {
@@ -11,6 +11,9 @@ impl MyType {
 }
 
 fn hello() -> impl Sized + use<hello> {}
-//~^ ERROR expected type or const parameter in `use<...>` precise captures list, found function
+//~^ ERROR expected type or const parameter, found function `hello`
+
+fn arg(x: ()) -> impl Sized + use<x> {}
+//~^ ERROR expected type or const parameter, found local variable `x`
 
 fn main() {}
diff --git a/tests/ui/impl-trait/precise-capturing/bad-params.stderr b/tests/ui/impl-trait/precise-capturing/bad-params.stderr
index 06ccf356948..07ada8da300 100644
--- a/tests/ui/impl-trait/precise-capturing/bad-params.stderr
+++ b/tests/ui/impl-trait/precise-capturing/bad-params.stderr
@@ -1,4 +1,4 @@
-error[E0412]: cannot find type `T` in this scope
+error[E0800]: cannot find type or const parameter `T` in this scope
   --> $DIR/bad-params.rs:1:34
    |
 LL | fn missing() -> impl Sized + use<T> {}
@@ -9,7 +9,7 @@ help: you might be missing a type parameter
 LL | fn missing<T>() -> impl Sized + use<T> {}
    |           +++
 
-error[E0411]: cannot find type `Self` in this scope
+error[E0411]: cannot find type or const parameter `Self` in this scope
   --> $DIR/bad-params.rs:4:39
    |
 LL | fn missing_self() -> impl Sized + use<Self> {}
@@ -17,7 +17,19 @@ LL | fn missing_self() -> impl Sized + use<Self> {}
    |    |
    |    `Self` not allowed in a function
 
-error: `Self` can't be captured in `use<...>` precise captures list, since it is an alias
+error[E0799]: expected type or const parameter, found function `hello`
+  --> $DIR/bad-params.rs:13:32
+   |
+LL | fn hello() -> impl Sized + use<hello> {}
+   |                                ^^^^^ not a type or const parameter
+
+error[E0799]: expected type or const parameter, found local variable `x`
+  --> $DIR/bad-params.rs:16:35
+   |
+LL | fn arg(x: ()) -> impl Sized + use<x> {}
+   |                                   ^ not a type or const parameter
+
+error[E0799]: `Self` can't be captured in `use<...>` precise captures list, since it is an alias
   --> $DIR/bad-params.rs:9:48
    |
 LL | impl MyType {
@@ -25,13 +37,7 @@ LL | impl MyType {
 LL |     fn self_is_not_param() -> impl Sized + use<Self> {}
    |                                                ^^^^
 
-error: expected type or const parameter in `use<...>` precise captures list, found function
-  --> $DIR/bad-params.rs:13:32
-   |
-LL | fn hello() -> impl Sized + use<hello> {}
-   |                                ^^^^^
-
-error: aborting due to 4 previous errors
+error: aborting due to 5 previous errors
 
-Some errors have detailed explanations: E0411, E0412.
+Some errors have detailed explanations: E0411, E0799, E0800.
 For more information about an error, try `rustc --explain E0411`.
diff --git a/tests/ui/impl-trait/precise-capturing/rpitit-captures-more-method-lifetimes.rs b/tests/ui/impl-trait/precise-capturing/rpitit-captures-more-method-lifetimes.rs
new file mode 100644
index 00000000000..71a91fe319e
--- /dev/null
+++ b/tests/ui/impl-trait/precise-capturing/rpitit-captures-more-method-lifetimes.rs
@@ -0,0 +1,16 @@
+// Make sure we don't ICE when an RPITIT captures more method args than the
+// trait definition, which is not allowed. Due to the default lifetime capture
+// rules of RPITITs, this is only doable if we use precise capturing.
+
+pub trait Foo {
+    fn bar<'tr: 'tr>(&'tr mut self) -> impl Sized + use<Self>;
+    //~^ ERROR `use<...>` precise capturing syntax is currently not allowed in return-position `impl Trait` in traits
+}
+
+impl Foo for () {
+    fn bar<'im: 'im>(&'im mut self) -> impl Sized + 'im {}
+    //~^ ERROR return type captures more lifetimes than trait definition
+    //~| WARN impl trait in impl method signature does not match trait method signature
+}
+
+fn main() {}
diff --git a/tests/ui/impl-trait/precise-capturing/rpitit-captures-more-method-lifetimes.stderr b/tests/ui/impl-trait/precise-capturing/rpitit-captures-more-method-lifetimes.stderr
new file mode 100644
index 00000000000..339e2e6335e
--- /dev/null
+++ b/tests/ui/impl-trait/precise-capturing/rpitit-captures-more-method-lifetimes.stderr
@@ -0,0 +1,42 @@
+error: `use<...>` precise capturing syntax is currently not allowed in return-position `impl Trait` in traits
+  --> $DIR/rpitit-captures-more-method-lifetimes.rs:6:53
+   |
+LL |     fn bar<'tr: 'tr>(&'tr mut self) -> impl Sized + use<Self>;
+   |                                                     ^^^^^^^^^
+   |
+   = note: currently, return-position `impl Trait` in traits and trait implementations capture all lifetimes in scope
+
+error: return type captures more lifetimes than trait definition
+  --> $DIR/rpitit-captures-more-method-lifetimes.rs:11:40
+   |
+LL |     fn bar<'im: 'im>(&'im mut self) -> impl Sized + 'im {}
+   |            ---                         ^^^^^^^^^^^^^^^^
+   |            |
+   |            this lifetime was captured
+   |
+note: hidden type must only reference lifetimes captured by this impl trait
+  --> $DIR/rpitit-captures-more-method-lifetimes.rs:6:40
+   |
+LL |     fn bar<'tr: 'tr>(&'tr mut self) -> impl Sized + use<Self>;
+   |                                        ^^^^^^^^^^^^^^^^^^^^^^
+   = note: hidden type inferred to be `impl Sized + 'im`
+
+warning: impl trait in impl method signature does not match trait method signature
+  --> $DIR/rpitit-captures-more-method-lifetimes.rs:11:40
+   |
+LL |     fn bar<'tr: 'tr>(&'tr mut self) -> impl Sized + use<Self>;
+   |                                        ---------------------- return type from trait method defined here
+...
+LL |     fn bar<'im: 'im>(&'im mut self) -> impl Sized + 'im {}
+   |                                        ^^^^^^^^^^^^^^^^
+   |
+   = note: add `#[allow(refining_impl_trait)]` if it is intended for this to be part of the public API of this crate
+   = note: we are soliciting feedback, see issue #121718 <https://github.com/rust-lang/rust/issues/121718> for more information
+   = note: `#[warn(refining_impl_trait_reachable)]` on by default
+help: replace the return type so that it matches the trait
+   |
+LL |     fn bar<'im: 'im>(&'im mut self) -> impl Sized {}
+   |                                        ~~~~~~~~~~
+
+error: aborting due to 2 previous errors; 1 warning emitted
+
diff --git a/tests/ui/layout/debug.rs b/tests/ui/layout/debug.rs
index 91e96d78ff5..166321798de 100644
--- a/tests/ui/layout/debug.rs
+++ b/tests/ui/layout/debug.rs
@@ -76,3 +76,8 @@ impl S {
 
 #[rustc_layout(debug)]
 type Impossible = (str, str); //~ ERROR: cannot be known at compilation time
+
+// Test that computing the layout of an empty union doesn't ICE.
+#[rustc_layout(debug)]
+union EmptyUnion {} //~ ERROR: has an unknown layout
+//~^ ERROR: unions cannot have zero fields
diff --git a/tests/ui/layout/debug.stderr b/tests/ui/layout/debug.stderr
index 5162a771b4d..c9715a8e146 100644
--- a/tests/ui/layout/debug.stderr
+++ b/tests/ui/layout/debug.stderr
@@ -1,3 +1,9 @@
+error: unions cannot have zero fields
+  --> $DIR/debug.rs:82:1
+   |
+LL | union EmptyUnion {}
+   | ^^^^^^^^^^^^^^^^^^^
+
 error: layout_of(E) = Layout {
            size: Size(12 bytes),
            align: AbiAndPrefAlign {
@@ -566,12 +572,18 @@ LL | type Impossible = (str, str);
    = help: the trait `Sized` is not implemented for `str`
    = note: only the last element of a tuple may have a dynamically sized type
 
+error: the type `EmptyUnion` has an unknown layout
+  --> $DIR/debug.rs:82:1
+   |
+LL | union EmptyUnion {}
+   | ^^^^^^^^^^^^^^^^
+
 error: `#[rustc_layout]` can only be applied to `struct`/`enum`/`union` declarations and type aliases
   --> $DIR/debug.rs:74:5
    |
 LL |     const C: () = ();
    |     ^^^^^^^^^^^
 
-error: aborting due to 17 previous errors
+error: aborting due to 19 previous errors
 
 For more information about this error, try `rustc --explain E0277`.
diff --git a/tests/ui/layout/invalid-unsized-const-eval.rs b/tests/ui/layout/invalid-unsized-const-eval.rs
new file mode 100644
index 00000000000..2dec0b0faac
--- /dev/null
+++ b/tests/ui/layout/invalid-unsized-const-eval.rs
@@ -0,0 +1,14 @@
+// issue: #124182
+
+//! This test used to trip an assertion in const eval, because `layout_of(LazyLock)`
+//! returned `Ok` with an unsized layout when a sized layout was expected.
+//! It was fixed by making `layout_of` always return `Err` for types that
+//! contain unsized fields in unexpected locations.
+
+struct LazyLock {
+    data: (dyn Sync, ()), //~ ERROR the size for values of type
+}
+
+static EMPTY_SET: LazyLock = todo!();
+
+fn main() {}
diff --git a/tests/ui/layout/invalid-unsized-const-eval.stderr b/tests/ui/layout/invalid-unsized-const-eval.stderr
new file mode 100644
index 00000000000..bf65782b7a8
--- /dev/null
+++ b/tests/ui/layout/invalid-unsized-const-eval.stderr
@@ -0,0 +1,12 @@
+error[E0277]: the size for values of type `(dyn Sync + 'static)` cannot be known at compilation time
+  --> $DIR/invalid-unsized-const-eval.rs:9:11
+   |
+LL |     data: (dyn Sync, ()),
+   |           ^^^^^^^^^^^^^^ doesn't have a size known at compile-time
+   |
+   = help: the trait `Sized` is not implemented for `(dyn Sync + 'static)`
+   = note: only the last element of a tuple may have a dynamically sized type
+
+error: aborting due to 1 previous error
+
+For more information about this error, try `rustc --explain E0277`.
diff --git a/tests/crashes/127737.rs b/tests/ui/layout/invalid-unsized-const-prop.rs
index 2ee8c769858..2f1c398c35a 100644
--- a/tests/crashes/127737.rs
+++ b/tests/ui/layout/invalid-unsized-const-prop.rs
@@ -1,6 +1,10 @@
-//@ known-bug: #127737
+// issue: #127737
+//@ check-pass
 //@ compile-flags: -Zmir-opt-level=5 --crate-type lib
 
+//! This test is very similar to `invalid-unsized-const-eval.rs`, but also requires
+//! checking for unsized types in the last field of each enum variant.
+
 pub trait TestTrait {
     type MyType;
     fn func() -> Option<Self>
diff --git a/tests/ui/layout/invalid-unsized-in-always-sized-tail.rs b/tests/ui/layout/invalid-unsized-in-always-sized-tail.rs
new file mode 100644
index 00000000000..5df97338710
--- /dev/null
+++ b/tests/ui/layout/invalid-unsized-in-always-sized-tail.rs
@@ -0,0 +1,17 @@
+// issue: rust-lang/rust#126939
+
+//! This used to ICE, because the layout algorithm did not check for unsized types
+//! in the struct tail of always-sized types (i.e. those that cannot be unsized)
+//! and incorrectly returned an unsized layout.
+
+struct MySlice<T>(T);
+type MySliceBool = MySlice<[bool]>;
+
+struct P2 {
+    b: MySliceBool,
+    //~^ ERROR: the size for values of type `[bool]` cannot be known at compilation time
+}
+
+static CHECK: () = assert!(align_of::<P2>() == 1);
+
+fn main() {}
diff --git a/tests/ui/layout/invalid-unsized-in-always-sized-tail.stderr b/tests/ui/layout/invalid-unsized-in-always-sized-tail.stderr
new file mode 100644
index 00000000000..3f565d6ee5b
--- /dev/null
+++ b/tests/ui/layout/invalid-unsized-in-always-sized-tail.stderr
@@ -0,0 +1,23 @@
+error[E0277]: the size for values of type `[bool]` cannot be known at compilation time
+  --> $DIR/invalid-unsized-in-always-sized-tail.rs:11:8
+   |
+LL |     b: MySliceBool,
+   |        ^^^^^^^^^^^ doesn't have a size known at compile-time
+   |
+   = help: the trait `Sized` is not implemented for `[bool]`
+note: required by an implicit `Sized` bound in `MySlice`
+  --> $DIR/invalid-unsized-in-always-sized-tail.rs:7:16
+   |
+LL | struct MySlice<T>(T);
+   |                ^ required by the implicit `Sized` requirement on this type parameter in `MySlice`
+help: you could relax the implicit `Sized` bound on `T` if it were used through indirection like `&T` or `Box<T>`
+  --> $DIR/invalid-unsized-in-always-sized-tail.rs:7:16
+   |
+LL | struct MySlice<T>(T);
+   |                ^  - ...if indirection were used here: `Box<T>`
+   |                |
+   |                this could be changed to `T: ?Sized`...
+
+error: aborting due to 1 previous error
+
+For more information about this error, try `rustc --explain E0277`.
diff --git a/tests/ui/layout/trivial-bounds-sized.rs b/tests/ui/layout/trivial-bounds-sized.rs
new file mode 100644
index 00000000000..a32539f80fa
--- /dev/null
+++ b/tests/ui/layout/trivial-bounds-sized.rs
@@ -0,0 +1,51 @@
+//@ check-pass
+
+//! With trivial bounds, it is possible to have ADTs with unsized fields
+//! in arbitrary places. Test that we do not ICE for such types.
+
+#![feature(trivial_bounds)]
+#![expect(trivial_bounds)]
+
+struct Struct
+where
+    [u8]: Sized,
+    [i16]: Sized,
+{
+    a: [u8],
+    b: [i16],
+    c: f32,
+}
+
+union Union
+where
+    [u8]: Copy,
+    [i16]: Copy,
+{
+    a: [u8],
+    b: [i16],
+    c: f32,
+}
+
+enum Enum
+where
+    [u8]: Sized,
+    [i16]: Sized,
+{
+    V1([u8], [i16]),
+    V2([i16], f32),
+}
+
+// This forces layout computation via the `variant_size_differences` lint.
+// FIXME: This could be made more robust, possibly with a variant of `rustc_layout`
+// that doesn't error.
+enum Check
+where
+    [u8]: Copy,
+    [i16]: Copy,
+{
+    Struct(Struct),
+    Union(Union),
+    Enum(Enum),
+}
+
+fn main() {}
diff --git a/tests/crashes/123134.rs b/tests/ui/layout/unsatisfiable-sized-ungated.rs
index 61c043db763..d9c1f739bdb 100644
--- a/tests/crashes/123134.rs
+++ b/tests/ui/layout/unsatisfiable-sized-ungated.rs
@@ -1,4 +1,9 @@
-//@ known-bug: #123134
+//@ check-pass
+// issue: #123134
+
+//! This is a variant of `trivial-bounds-sized.rs` that compiles without any
+//! feature gates and used to trigger a delayed bug.
+
 trait Api: Sized {
     type Device: ?Sized;
 }
@@ -7,7 +12,7 @@ struct OpenDevice<A: Api>
 where
     A::Device: Sized,
 {
-    device: A::Device,
+    device: A::Device, // <- this is the type that ends up being unsized.
     queue: (),
 }
 
@@ -31,6 +36,8 @@ impl<T> Adapter for T {
     fn open() -> OpenDevice<Self::A>
     where
         <Self::A as Api>::Device: Sized,
+        // ^ the bound expands to `<<T as Adapter>::A as Api>::Device: Sized`, which
+        // is not considered trivial due to containing the type parameter `T`
     {
         unreachable!()
     }
diff --git a/tests/ui/methods/probe-error-on-infinite-deref.rs b/tests/ui/methods/probe-error-on-infinite-deref.rs
new file mode 100644
index 00000000000..85c1c0c09c1
--- /dev/null
+++ b/tests/ui/methods/probe-error-on-infinite-deref.rs
@@ -0,0 +1,16 @@
+use std::ops::Deref;
+
+// Make sure that method probe error reporting doesn't get too tangled up
+// on this infinite deref impl. See #130224.
+
+struct Wrap<T>(T);
+impl<T> Deref for Wrap<T> {
+    type Target = Wrap<Wrap<T>>;
+    fn deref(&self) -> &Wrap<Wrap<T>> { todo!() }
+}
+
+fn main() {
+    Wrap(1).lmao();
+    //~^ ERROR reached the recursion limit
+    //~| ERROR no method named `lmao`
+}
diff --git a/tests/ui/methods/probe-error-on-infinite-deref.stderr b/tests/ui/methods/probe-error-on-infinite-deref.stderr
new file mode 100644
index 00000000000..57a9ca2eaa8
--- /dev/null
+++ b/tests/ui/methods/probe-error-on-infinite-deref.stderr
@@ -0,0 +1,21 @@
+error[E0055]: reached the recursion limit while auto-dereferencing `Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<Wrap<{integer}>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>`
+  --> $DIR/probe-error-on-infinite-deref.rs:13:13
+   |
+LL |     Wrap(1).lmao();
+   |             ^^^^ deref recursion limit reached
+   |
+   = help: consider increasing the recursion limit by adding a `#![recursion_limit = "256"]` attribute to your crate (`probe_error_on_infinite_deref`)
+
+error[E0599]: no method named `lmao` found for struct `Wrap<{integer}>` in the current scope
+  --> $DIR/probe-error-on-infinite-deref.rs:13:13
+   |
+LL | struct Wrap<T>(T);
+   | -------------- method `lmao` not found for this struct
+...
+LL |     Wrap(1).lmao();
+   |             ^^^^ method not found in `Wrap<{integer}>`
+
+error: aborting due to 2 previous errors
+
+Some errors have detailed explanations: E0055, E0599.
+For more information about an error, try `rustc --explain E0055`.
diff --git a/tests/ui/mismatched_types/mismatch-args-vargs-issue-130372.rs b/tests/ui/mismatched_types/mismatch-args-vargs-issue-130372.rs
new file mode 100644
index 00000000000..60a3b47010e
--- /dev/null
+++ b/tests/ui/mismatched_types/mismatch-args-vargs-issue-130372.rs
@@ -0,0 +1,12 @@
+#![feature(c_variadic)]
+
+// Regression test that covers all 3 cases of https://github.com/rust-lang/rust/issues/130372
+
+unsafe extern "C" fn test_va_copy(_: u64, mut ap: ...) {}
+
+pub fn main() {
+    unsafe {
+        test_va_copy();
+        //~^ ERROR this function takes at least 1 argument but 0 arguments were supplied
+    }
+}
diff --git a/tests/ui/mismatched_types/mismatch-args-vargs-issue-130372.stderr b/tests/ui/mismatched_types/mismatch-args-vargs-issue-130372.stderr
new file mode 100644
index 00000000000..38f76970358
--- /dev/null
+++ b/tests/ui/mismatched_types/mismatch-args-vargs-issue-130372.stderr
@@ -0,0 +1,19 @@
+error[E0060]: this function takes at least 1 argument but 0 arguments were supplied
+  --> $DIR/mismatch-args-vargs-issue-130372.rs:9:9
+   |
+LL |         test_va_copy();
+   |         ^^^^^^^^^^^^-- argument #1 of type `u64` is missing
+   |
+note: function defined here
+  --> $DIR/mismatch-args-vargs-issue-130372.rs:5:22
+   |
+LL | unsafe extern "C" fn test_va_copy(_: u64, mut ap: ...) {}
+   |                      ^^^^^^^^^^^^ ------
+help: provide the argument
+   |
+LL |         test_va_copy(/* u64 */);
+   |                     ~~~~~~~~~~~
+
+error: aborting due to 1 previous error
+
+For more information about this error, try `rustc --explain E0060`.
diff --git a/tests/ui/resolve/auxiliary/foreign-trait-with-assoc.rs b/tests/ui/resolve/auxiliary/foreign-trait-with-assoc.rs
new file mode 100644
index 00000000000..952957ec480
--- /dev/null
+++ b/tests/ui/resolve/auxiliary/foreign-trait-with-assoc.rs
@@ -0,0 +1,3 @@
+pub trait Foo {
+    type Bar;
+}
diff --git a/tests/ui/resolve/dont-compute-arg-names-for-non-fn.rs b/tests/ui/resolve/dont-compute-arg-names-for-non-fn.rs
new file mode 100644
index 00000000000..20bbbff8fd2
--- /dev/null
+++ b/tests/ui/resolve/dont-compute-arg-names-for-non-fn.rs
@@ -0,0 +1,11 @@
+//@ aux-build:foreign-trait-with-assoc.rs
+
+extern crate foreign_trait_with_assoc;
+use foreign_trait_with_assoc::Foo;
+
+// Make sure we don't try to call `fn_arg_names` on a non-fn item.
+
+impl Foo for Bar {}
+//~^ ERROR cannot find type `Bar` in this scope
+
+fn main() {}
diff --git a/tests/ui/resolve/dont-compute-arg-names-for-non-fn.stderr b/tests/ui/resolve/dont-compute-arg-names-for-non-fn.stderr
new file mode 100644
index 00000000000..a1a8bb575e1
--- /dev/null
+++ b/tests/ui/resolve/dont-compute-arg-names-for-non-fn.stderr
@@ -0,0 +1,14 @@
+error[E0412]: cannot find type `Bar` in this scope
+  --> $DIR/dont-compute-arg-names-for-non-fn.rs:8:14
+   |
+LL | impl Foo for Bar {}
+   |              ^^^
+   |
+help: you might have meant to use the associated type
+   |
+LL | impl Foo for Self::Bar {}
+   |              ++++++
+
+error: aborting due to 1 previous error
+
+For more information about this error, try `rustc --explain E0412`.