about summary refs log tree commit diff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc_abi/src/callconv/reg.rs22
-rw-r--r--compiler/rustc_abi/src/layout.rs71
-rw-r--r--compiler/rustc_abi/src/layout/simple.rs27
-rw-r--r--compiler/rustc_abi/src/lib.rs87
-rw-r--r--compiler/rustc_ast/src/ast.rs3
-rw-r--r--compiler/rustc_ast/src/expand/autodiff_attrs.rs17
-rw-r--r--compiler/rustc_ast/src/expand/typetree.rs1
-rw-r--r--compiler/rustc_ast/src/lib.rs1
-rw-r--r--compiler/rustc_ast/src/tokenstream.rs4
-rw-r--r--compiler/rustc_ast_passes/src/feature_gate.rs2
-rw-r--r--compiler/rustc_attr_parsing/messages.ftl10
-rw-r--r--compiler/rustc_attr_parsing/src/context.rs7
-rw-r--r--compiler/rustc_attr_parsing/src/lints.rs18
-rw-r--r--compiler/rustc_attr_parsing/src/parser.rs2
-rw-r--r--compiler/rustc_attr_parsing/src/session_diagnostics.rs3
-rw-r--r--compiler/rustc_borrowck/src/borrowck_errors.rs4
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs8
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs20
-rw-r--r--compiler/rustc_builtin_macros/src/autodiff.rs3
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/coerce_pointee.rs15
-rw-r--r--compiler/rustc_builtin_macros/src/format.rs2
-rw-r--r--compiler/rustc_builtin_macros/src/lib.rs1
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/comments.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/base.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/types.rs6
-rw-r--r--compiler/rustc_codegen_cranelift/src/lib.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/unsize.rs6
-rw-r--r--compiler/rustc_codegen_cranelift/src/value_and_place.rs6
-rw-r--r--compiler/rustc_codegen_gcc/src/builder.rs1
-rw-r--r--compiler/rustc_codegen_gcc/src/context.rs6
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/mod.rs1
-rw-r--r--compiler/rustc_codegen_gcc/src/lib.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/allocator.rs30
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs15
-rw-r--r--compiler/rustc_codegen_llvm/src/builder/autodiff.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs81
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/utils.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs14
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs206
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs97
-rw-r--r--compiler/rustc_codegen_llvm/src/typetree.rs122
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs13
-rw-r--r--compiler/rustc_codegen_ssa/src/back/link.rs6
-rw-r--r--compiler/rustc_codegen_ssa/src/back/linker.rs13
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs1
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/statement.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/size_of_val.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/backend.rs11
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/builder.rs3
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs2
-rw-r--r--compiler/rustc_const_eval/src/util/alignment.rs2
-rw-r--r--compiler/rustc_const_eval/src/util/check_validity_requirement.rs2
-rw-r--r--compiler/rustc_expand/src/mbe/macro_check.rs11
-rw-r--r--compiler/rustc_feature/src/removed.rs4
-rw-r--r--compiler/rustc_feature/src/unstable.rs6
-rw-r--r--compiler/rustc_hir/src/hir.rs5
-rw-r--r--compiler/rustc_hir/src/lang_items.rs1
-rw-r--r--compiler/rustc_hir/src/lints.rs15
-rw-r--r--compiler/rustc_hir_analysis/src/collect.rs4
-rw-r--r--compiler/rustc_hir_analysis/src/constrained_generic_params.rs2
-rw-r--r--compiler/rustc_hir_analysis/src/hir_ty_lowering/cmse.rs3
-rw-r--r--compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs2
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs4
-rw-r--r--compiler/rustc_hir_typeck/src/lib.rs1
-rw-r--r--compiler/rustc_hir_typeck/src/method/suggest.rs4
-rw-r--r--compiler/rustc_interface/src/passes.rs26
-rw-r--r--compiler/rustc_interface/src/tests.rs2
-rw-r--r--compiler/rustc_interface/src/util.rs2
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp18
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp65
-rw-r--r--compiler/rustc_metadata/src/rmeta/decoder.rs2
-rw-r--r--compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs4
-rw-r--r--compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs9
-rw-r--r--compiler/rustc_middle/src/error.rs1
-rw-r--r--compiler/rustc_middle/src/hooks/mod.rs2
-rw-r--r--compiler/rustc_middle/src/ty/context.rs2
-rw-r--r--compiler/rustc_middle/src/ty/mod.rs225
-rw-r--r--compiler/rustc_middle/src/ty/vtable.rs2
-rw-r--r--compiler/rustc_mir_build/src/check_unsafety.rs15
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/check_match.rs8
-rw-r--r--compiler/rustc_mir_transform/src/abort_unwinding_calls.rs37
-rw-r--r--compiler/rustc_mir_transform/src/dataflow_const_prop.rs2
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs2
-rw-r--r--compiler/rustc_mir_transform/src/known_panics_lint.rs2
-rw-r--r--compiler/rustc_mir_transform/src/patch.rs30
-rw-r--r--compiler/rustc_mir_transform/src/simplify_branches.rs19
-rw-r--r--compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs5
-rw-r--r--compiler/rustc_next_trait_solver/src/solve/effect_goals.rs21
-rw-r--r--compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs19
-rw-r--r--compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs135
-rw-r--r--compiler/rustc_next_trait_solver/src/solve/search_graph.rs26
-rw-r--r--compiler/rustc_next_trait_solver/src/solve/trait_goals.rs46
-rw-r--r--compiler/rustc_parse/src/lib.rs1
-rw-r--r--compiler/rustc_parse/src/parser/tokenstream/tests.rs2
-rw-r--r--compiler/rustc_passes/messages.ftl13
-rw-r--r--compiler/rustc_passes/src/check_attr.rs71
-rw-r--r--compiler/rustc_passes/src/errors.rs20
-rw-r--r--compiler/rustc_resolve/src/ident.rs33
-rw-r--r--compiler/rustc_resolve/src/late/diagnostics.rs99
-rw-r--r--compiler/rustc_resolve/src/lib.rs6
-rw-r--r--compiler/rustc_session/src/config.rs14
-rw-r--r--compiler/rustc_session/src/config/cfg.rs10
-rw-r--r--compiler/rustc_session/src/options.rs5
-rw-r--r--compiler/rustc_span/src/analyze_source_file.rs109
-rw-r--r--compiler/rustc_span/src/lib.rs1
-rw-r--r--compiler/rustc_span/src/symbol.rs6
-rw-r--r--compiler/rustc_target/src/callconv/arm.rs2
-rw-r--r--compiler/rustc_target/src/callconv/loongarch.rs2
-rw-r--r--compiler/rustc_target/src/callconv/mips.rs2
-rw-r--r--compiler/rustc_target/src/callconv/mips64.rs4
-rw-r--r--compiler/rustc_target/src/callconv/mod.rs2
-rw-r--r--compiler/rustc_target/src/callconv/nvptx64.rs4
-rw-r--r--compiler/rustc_target/src/callconv/powerpc64.rs2
-rw-r--r--compiler/rustc_target/src/callconv/riscv.rs2
-rw-r--r--compiler/rustc_target/src/callconv/sparc.rs2
-rw-r--r--compiler/rustc_target/src/callconv/sparc64.rs2
-rw-r--r--compiler/rustc_target/src/callconv/xtensa.rs2
-rw-r--r--compiler/rustc_trait_selection/src/error_reporting/traits/mod.rs45
-rw-r--r--compiler/rustc_transmute/src/layout/tree.rs2
-rw-r--r--compiler/rustc_ty_utils/src/layout.rs10
-rw-r--r--compiler/rustc_ty_utils/src/layout/invariant.rs6
-rw-r--r--compiler/rustc_type_ir/src/search_graph/mod.rs148
132 files changed, 1750 insertions, 699 deletions
diff --git a/compiler/rustc_abi/src/callconv/reg.rs b/compiler/rustc_abi/src/callconv/reg.rs
index 8cf140dbaad..66c8056d0c2 100644
--- a/compiler/rustc_abi/src/callconv/reg.rs
+++ b/compiler/rustc_abi/src/callconv/reg.rs
@@ -42,22 +42,22 @@ impl Reg {
         let dl = cx.data_layout();
         match self.kind {
             RegKind::Integer => match self.size.bits() {
-                1 => dl.i1_align.abi,
-                2..=8 => dl.i8_align.abi,
-                9..=16 => dl.i16_align.abi,
-                17..=32 => dl.i32_align.abi,
-                33..=64 => dl.i64_align.abi,
-                65..=128 => dl.i128_align.abi,
+                1 => dl.i1_align,
+                2..=8 => dl.i8_align,
+                9..=16 => dl.i16_align,
+                17..=32 => dl.i32_align,
+                33..=64 => dl.i64_align,
+                65..=128 => dl.i128_align,
                 _ => panic!("unsupported integer: {self:?}"),
             },
             RegKind::Float => match self.size.bits() {
-                16 => dl.f16_align.abi,
-                32 => dl.f32_align.abi,
-                64 => dl.f64_align.abi,
-                128 => dl.f128_align.abi,
+                16 => dl.f16_align,
+                32 => dl.f32_align,
+                64 => dl.f64_align,
+                128 => dl.f128_align,
                 _ => panic!("unsupported float: {self:?}"),
             },
-            RegKind::Vector => dl.llvmlike_vector_align(self.size).abi,
+            RegKind::Vector => dl.llvmlike_vector_align(self.size),
         }
     }
 }
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index 5004d0c8022..14356813b7b 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -174,11 +174,11 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
             // Non-power-of-two vectors have padding up to the next power-of-two.
             // If we're a packed repr, remove the padding while keeping the alignment as close
             // to a vector as possible.
-            (BackendRepr::Memory { sized: true }, AbiAlign { abi: Align::max_aligned_factor(size) })
+            (BackendRepr::Memory { sized: true }, Align::max_aligned_factor(size))
         } else {
             (BackendRepr::SimdVector { element: e_repr, count }, dl.llvmlike_vector_align(size))
         };
-        let size = size.align_to(align.abi);
+        let size = size.align_to(align);
 
         Ok(LayoutData {
             variants: Variants::Single { index: VariantIdx::new(0) },
@@ -190,7 +190,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
             largest_niche: elt.largest_niche,
             uninhabited: false,
             size,
-            align,
+            align: AbiAlign::new(align),
             max_repr_align: None,
             unadjusted_abi_align: elt.align.abi,
             randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)),
@@ -388,7 +388,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                 return Err(LayoutCalculatorError::UnexpectedUnsized(*field));
             }
 
-            align = align.max(field.align);
+            align = align.max(field.align.abi);
             max_repr_align = max_repr_align.max(field.max_repr_align);
             size = cmp::max(size, field.size);
 
@@ -423,13 +423,13 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
         }
 
         if let Some(pack) = repr.pack {
-            align = align.min(AbiAlign::new(pack));
+            align = align.min(pack);
         }
         // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
         // See documentation on `LayoutData::unadjusted_abi_align`.
-        let unadjusted_abi_align = align.abi;
+        let unadjusted_abi_align = align;
         if let Some(repr_align) = repr.align {
-            align = align.max(AbiAlign::new(repr_align));
+            align = align.max(repr_align);
         }
         // `align` must not be modified after this, or `unadjusted_abi_align` could be inaccurate.
         let align = align;
@@ -441,14 +441,12 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
             Ok(Some((repr, _))) => match repr {
                 // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
                 BackendRepr::Scalar(_) | BackendRepr::ScalarPair(_, _)
-                    if repr.scalar_align(dl).unwrap() != align.abi =>
+                    if repr.scalar_align(dl).unwrap() != align =>
                 {
                     BackendRepr::Memory { sized: true }
                 }
                 // Vectors require at least element alignment, else disable the opt
-                BackendRepr::SimdVector { element, count: _ }
-                    if element.align(dl).abi > align.abi =>
-                {
+                BackendRepr::SimdVector { element, count: _ } if element.align(dl).abi > align => {
                     BackendRepr::Memory { sized: true }
                 }
                 // the alignment tests passed and we can use this
@@ -474,8 +472,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
             backend_repr,
             largest_niche: None,
             uninhabited: false,
-            align,
-            size: size.align_to(align.abi),
+            align: AbiAlign::new(align),
+            size: size.align_to(align),
             max_repr_align,
             unadjusted_abi_align,
             randomization_seed: combined_seed,
@@ -611,7 +609,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
 
             let mut align = dl.aggregate_align;
             let mut max_repr_align = repr.align;
-            let mut unadjusted_abi_align = align.abi;
+            let mut unadjusted_abi_align = align;
 
             let mut variant_layouts = variants
                 .iter_enumerated()
@@ -619,7 +617,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                     let mut st = self.univariant(v, repr, StructKind::AlwaysSized).ok()?;
                     st.variants = Variants::Single { index: j };
 
-                    align = align.max(st.align);
+                    align = align.max(st.align.abi);
                     max_repr_align = max_repr_align.max(st.max_repr_align);
                     unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
 
@@ -646,7 +644,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
             let (niche_start, niche_scalar) = niche.reserve(dl, count)?;
             let niche_offset = niche.offset;
             let niche_size = niche.value.size(dl);
-            let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
+            let size = variant_layouts[largest_variant_index].size.align_to(align);
 
             let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
                 if i == largest_variant_index {
@@ -699,7 +697,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                 .iter_enumerated()
                 .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
             let same_size = size == variant_layouts[largest_variant_index].size;
-            let same_align = align == variant_layouts[largest_variant_index].align;
+            let same_align = align == variant_layouts[largest_variant_index].align.abi;
 
             let uninhabited = variant_layouts.iter().all(|v| v.is_uninhabited());
             let abi = if same_size && same_align && others_zst {
@@ -746,7 +744,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                 largest_niche,
                 uninhabited,
                 size,
-                align,
+                align: AbiAlign::new(align),
                 max_repr_align,
                 unadjusted_abi_align,
                 randomization_seed: combined_seed,
@@ -818,7 +816,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
 
         let mut align = dl.aggregate_align;
         let mut max_repr_align = repr.align;
-        let mut unadjusted_abi_align = align.abi;
+        let mut unadjusted_abi_align = align;
 
         let mut size = Size::ZERO;
 
@@ -860,7 +858,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                     }
                 }
                 size = cmp::max(size, st.size);
-                align = align.max(st.align);
+                align = align.max(st.align.abi);
                 max_repr_align = max_repr_align.max(st.max_repr_align);
                 unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
                 Ok(st)
@@ -868,7 +866,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
 
         // Align the maximum variant size to the largest alignment.
-        size = size.align_to(align.abi);
+        size = size.align_to(align);
 
         // FIXME(oli-obk): deduplicate and harden these checks
         if size.bytes() >= dl.obj_size_bound() {
@@ -1042,7 +1040,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                 };
                 if pair_offsets[FieldIdx::new(0)] == Size::ZERO
                     && pair_offsets[FieldIdx::new(1)] == *offset
-                    && align == pair.align
+                    && align == pair.align.abi
                     && size == pair.size
                 {
                     // We can use `ScalarPair` only when it matches our
@@ -1066,7 +1064,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                     // Also need to bump up the size and alignment, so that the entire value fits
                     // in here.
                     variant.size = cmp::max(variant.size, size);
-                    variant.align.abi = cmp::max(variant.align.abi, align.abi);
+                    variant.align.abi = cmp::max(variant.align.abi, align);
                 }
             }
         }
@@ -1092,7 +1090,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
             largest_niche,
             uninhabited,
             backend_repr: abi,
-            align,
+            align: AbiAlign::new(align),
             size,
             max_repr_align,
             unadjusted_abi_align,
@@ -1169,7 +1167,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                 // To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must
                 // not depend on the layout of the tail.
                 let max_field_align =
-                    fields_excluding_tail.iter().map(|f| f.align.abi.bytes()).max().unwrap_or(1);
+                    fields_excluding_tail.iter().map(|f| f.align.bytes()).max().unwrap_or(1);
                 let largest_niche_size = fields_excluding_tail
                     .iter()
                     .filter_map(|f| f.largest_niche)
@@ -1189,7 +1187,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                     } else {
                         // Returns `log2(effective-align)`. The calculation assumes that size is an
                         // integer multiple of align, except for ZSTs.
-                        let align = layout.align.abi.bytes();
+                        let align = layout.align.bytes();
                         let size = layout.size.bytes();
                         let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0);
                         // Group [u8; 4] with align-4 or [u8; 6] with align-2 fields.
@@ -1288,7 +1286,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
             let prefix_align =
                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
-            align = align.max(AbiAlign::new(prefix_align));
+            align = align.max(prefix_align);
             offset = prefix_size.align_to(prefix_align);
         }
         for &i in &inverse_memory_index {
@@ -1312,7 +1310,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                 field.align
             };
             offset = offset.align_to(field_align.abi);
-            align = align.max(field_align);
+            align = align.max(field_align.abi);
             max_repr_align = max_repr_align.max(field.max_repr_align);
 
             debug!("univariant offset: {:?} field: {:#?}", offset, field);
@@ -1339,9 +1337,9 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
 
         // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
         // See documentation on `LayoutData::unadjusted_abi_align`.
-        let unadjusted_abi_align = align.abi;
+        let unadjusted_abi_align = align;
         if let Some(repr_align) = repr.align {
-            align = align.max(AbiAlign::new(repr_align));
+            align = align.max(repr_align);
         }
         // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate.
         let align = align;
@@ -1360,7 +1358,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
             debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
             inverse_memory_index.into_iter().map(|it| it.index() as u32).collect()
         };
-        let size = min_size.align_to(align.abi);
+        let size = min_size.align_to(align);
         // FIXME(oli-obk): deduplicate and harden these checks
         if size.bytes() >= dl.obj_size_bound() {
             return Err(LayoutCalculatorError::SizeOverflow);
@@ -1383,8 +1381,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                     layout_of_single_non_zst_field = Some(field);
 
                     // Field fills the struct and it has a scalar or scalar pair ABI.
-                    if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
-                    {
+                    if offsets[i].bytes() == 0 && align == field.align.abi && size == field.size {
                         match field.backend_repr {
                             // For plain scalars, or vectors of them, we can't unpack
                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
@@ -1428,7 +1425,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                             };
                             if offsets[i] == pair_offsets[FieldIdx::new(0)]
                                 && offsets[j] == pair_offsets[FieldIdx::new(1)]
-                                && align == pair.align
+                                && align == pair.align.abi
                                 && size == pair.size
                             {
                                 // We can use `ScalarPair` only when it matches our
@@ -1450,7 +1447,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
                 Some(l) => l.unadjusted_abi_align,
                 None => {
                     // `repr(transparent)` with all ZST fields.
-                    align.abi
+                    align
                 }
             }
         } else {
@@ -1465,7 +1462,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
             backend_repr: abi,
             largest_niche,
             uninhabited,
-            align,
+            align: AbiAlign::new(align),
             size,
             max_repr_align,
             unadjusted_abi_align,
@@ -1488,7 +1485,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
         for i in layout.fields.index_by_increasing_offset() {
             let offset = layout.fields.offset(i);
             let f = &fields[FieldIdx::new(i)];
-            write!(s, "[o{}a{}s{}", offset.bytes(), f.align.abi.bytes(), f.size.bytes()).unwrap();
+            write!(s, "[o{}a{}s{}", offset.bytes(), f.align.bytes(), f.size.bytes()).unwrap();
             if let Some(n) = f.largest_niche {
                 write!(
                     s,
diff --git a/compiler/rustc_abi/src/layout/simple.rs b/compiler/rustc_abi/src/layout/simple.rs
index 0d0706defc2..b3807c87273 100644
--- a/compiler/rustc_abi/src/layout/simple.rs
+++ b/compiler/rustc_abi/src/layout/simple.rs
@@ -4,7 +4,8 @@ use rustc_hashes::Hash64;
 use rustc_index::{Idx, IndexVec};
 
 use crate::{
-    BackendRepr, FieldsShape, HasDataLayout, LayoutData, Niche, Primitive, Scalar, Size, Variants,
+    AbiAlign, BackendRepr, FieldsShape, HasDataLayout, LayoutData, Niche, Primitive, Scalar, Size,
+    Variants,
 };
 
 /// "Simple" layout constructors that cannot fail.
@@ -20,10 +21,10 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
             backend_repr: BackendRepr::Memory { sized },
             largest_niche: None,
             uninhabited: false,
-            align: dl.i8_align,
+            align: AbiAlign::new(dl.i8_align),
             size: Size::ZERO,
             max_repr_align: None,
-            unadjusted_abi_align: dl.i8_align.abi,
+            unadjusted_abi_align: dl.i8_align,
             randomization_seed: Hash64::new(0),
         }
     }
@@ -37,10 +38,10 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
             backend_repr: BackendRepr::Memory { sized: true },
             largest_niche: None,
             uninhabited: true,
-            align: dl.i8_align,
+            align: AbiAlign::new(dl.i8_align),
             size: Size::ZERO,
             max_repr_align: None,
-            unadjusted_abi_align: dl.i8_align.abi,
+            unadjusted_abi_align: dl.i8_align,
             randomization_seed: Hash64::ZERO,
         }
     }
@@ -89,10 +90,10 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
 
     pub fn scalar_pair<C: HasDataLayout>(cx: &C, a: Scalar, b: Scalar) -> Self {
         let dl = cx.data_layout();
-        let b_align = b.align(dl);
-        let align = a.align(dl).max(b_align).max(dl.aggregate_align);
-        let b_offset = a.size(dl).align_to(b_align.abi);
-        let size = (b_offset + b.size(dl)).align_to(align.abi);
+        let b_align = b.align(dl).abi;
+        let align = a.align(dl).abi.max(b_align).max(dl.aggregate_align);
+        let b_offset = a.size(dl).align_to(b_align);
+        let size = (b_offset + b.size(dl)).align_to(align);
 
         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
         // returns the last maximum.
@@ -112,10 +113,10 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
             backend_repr: BackendRepr::ScalarPair(a, b),
             largest_niche,
             uninhabited: false,
-            align,
+            align: AbiAlign::new(align),
             size,
             max_repr_align: None,
-            unadjusted_abi_align: align.abi,
+            unadjusted_abi_align: align,
             randomization_seed: Hash64::new(combined_seed),
         }
     }
@@ -138,10 +139,10 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
             backend_repr: BackendRepr::Memory { sized: true },
             largest_niche: None,
             uninhabited: true,
-            align: dl.i8_align,
+            align: AbiAlign::new(dl.i8_align),
             size: Size::ZERO,
             max_repr_align: None,
-            unadjusted_abi_align: dl.i8_align.abi,
+            unadjusted_abi_align: dl.i8_align,
             randomization_seed: Hash64::ZERO,
         }
     }
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index 369874521e5..de44c8755a0 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -229,7 +229,7 @@ pub struct PointerSpec {
     /// The size of the bitwise representation of the pointer.
     pointer_size: Size,
     /// The alignment of pointers for this address space
-    pointer_align: AbiAlign,
+    pointer_align: Align,
     /// The size of the value a pointer can be offset by in this address space.
     pointer_offset: Size,
     /// Pointers into this address space contain extra metadata
@@ -242,20 +242,20 @@ pub struct PointerSpec {
 #[derive(Debug, PartialEq, Eq)]
 pub struct TargetDataLayout {
     pub endian: Endian,
-    pub i1_align: AbiAlign,
-    pub i8_align: AbiAlign,
-    pub i16_align: AbiAlign,
-    pub i32_align: AbiAlign,
-    pub i64_align: AbiAlign,
-    pub i128_align: AbiAlign,
-    pub f16_align: AbiAlign,
-    pub f32_align: AbiAlign,
-    pub f64_align: AbiAlign,
-    pub f128_align: AbiAlign,
-    pub aggregate_align: AbiAlign,
+    pub i1_align: Align,
+    pub i8_align: Align,
+    pub i16_align: Align,
+    pub i32_align: Align,
+    pub i64_align: Align,
+    pub i128_align: Align,
+    pub f16_align: Align,
+    pub f32_align: Align,
+    pub f64_align: Align,
+    pub f128_align: Align,
+    pub aggregate_align: Align,
 
     /// Alignments for vector types.
-    pub vector_align: Vec<(Size, AbiAlign)>,
+    pub vector_align: Vec<(Size, Align)>,
 
     pub default_address_space: AddressSpace,
     pub default_address_space_pointer_spec: PointerSpec,
@@ -282,25 +282,25 @@ impl Default for TargetDataLayout {
         let align = |bits| Align::from_bits(bits).unwrap();
         TargetDataLayout {
             endian: Endian::Big,
-            i1_align: AbiAlign::new(align(8)),
-            i8_align: AbiAlign::new(align(8)),
-            i16_align: AbiAlign::new(align(16)),
-            i32_align: AbiAlign::new(align(32)),
-            i64_align: AbiAlign::new(align(32)),
-            i128_align: AbiAlign::new(align(32)),
-            f16_align: AbiAlign::new(align(16)),
-            f32_align: AbiAlign::new(align(32)),
-            f64_align: AbiAlign::new(align(64)),
-            f128_align: AbiAlign::new(align(128)),
-            aggregate_align: AbiAlign { abi: align(8) },
+            i1_align: align(8),
+            i8_align: align(8),
+            i16_align: align(16),
+            i32_align: align(32),
+            i64_align: align(32),
+            i128_align: align(32),
+            f16_align: align(16),
+            f32_align: align(32),
+            f64_align: align(64),
+            f128_align: align(128),
+            aggregate_align: align(8),
             vector_align: vec![
-                (Size::from_bits(64), AbiAlign::new(align(64))),
-                (Size::from_bits(128), AbiAlign::new(align(128))),
+                (Size::from_bits(64), align(64)),
+                (Size::from_bits(128), align(128)),
             ],
             default_address_space: AddressSpace::ZERO,
             default_address_space_pointer_spec: PointerSpec {
                 pointer_size: Size::from_bits(64),
-                pointer_align: AbiAlign::new(align(64)),
+                pointer_align: align(64),
                 pointer_offset: Size::from_bits(64),
                 _is_fat: false,
             },
@@ -360,7 +360,7 @@ impl TargetDataLayout {
                     .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
             };
             let abi = parse_bits(s, "alignment", cause)?;
-            Ok(AbiAlign::new(align_from_bits(abi)?))
+            Ok(align_from_bits(abi)?)
         };
 
         // Parse an alignment sequence, possibly in the form `<align>[:<preferred_alignment>]`,
@@ -596,7 +596,7 @@ impl TargetDataLayout {
 
     /// psABI-mandated alignment for a vector type, if any
     #[inline]
-    fn cabi_vector_align(&self, vec_size: Size) -> Option<AbiAlign> {
+    fn cabi_vector_align(&self, vec_size: Size) -> Option<Align> {
         self.vector_align
             .iter()
             .find(|(size, _align)| *size == vec_size)
@@ -605,10 +605,9 @@ impl TargetDataLayout {
 
     /// an alignment resembling the one LLVM would pick for a vector
     #[inline]
-    pub fn llvmlike_vector_align(&self, vec_size: Size) -> AbiAlign {
-        self.cabi_vector_align(vec_size).unwrap_or(AbiAlign::new(
-            Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap(),
-        ))
+    pub fn llvmlike_vector_align(&self, vec_size: Size) -> Align {
+        self.cabi_vector_align(vec_size)
+            .unwrap_or(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
     }
 
     /// Get the pointer size in the default data address space.
@@ -654,21 +653,19 @@ impl TargetDataLayout {
     /// Get the pointer alignment in the default data address space.
     #[inline]
     pub fn pointer_align(&self) -> AbiAlign {
-        self.default_address_space_pointer_spec.pointer_align
+        AbiAlign::new(self.default_address_space_pointer_spec.pointer_align)
     }
 
     /// Get the pointer alignment in a specific address space.
     #[inline]
     pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign {
-        if c == self.default_address_space {
-            return self.default_address_space_pointer_spec.pointer_align;
-        }
-
-        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
+        AbiAlign::new(if c == self.default_address_space {
+            self.default_address_space_pointer_spec.pointer_align
+        } else if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
             e.1.pointer_align
         } else {
             panic!("Use of unknown address space {c:?}");
-        }
+        })
     }
 }
 
@@ -1185,13 +1182,13 @@ impl Integer {
         use Integer::*;
         let dl = cx.data_layout();
 
-        match self {
+        AbiAlign::new(match self {
             I8 => dl.i8_align,
             I16 => dl.i16_align,
             I32 => dl.i32_align,
             I64 => dl.i64_align,
             I128 => dl.i128_align,
-        }
+        })
     }
 
     /// Returns the largest signed value that can be represented by this Integer.
@@ -1311,12 +1308,12 @@ impl Float {
         use Float::*;
         let dl = cx.data_layout();
 
-        match self {
+        AbiAlign::new(match self {
             F16 => dl.f16_align,
             F32 => dl.f32_align,
             F64 => dl.f64_align,
             F128 => dl.f128_align,
-        }
+        })
     }
 }
 
@@ -2159,7 +2156,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
 
     /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
     pub fn is_1zst(&self) -> bool {
-        self.is_sized() && self.size.bytes() == 0 && self.align.abi.bytes() == 1
+        self.is_sized() && self.size.bytes() == 0 && self.align.bytes() == 1
     }
 
     /// Returns `true` if the type is a ZST and not unsized.
diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs
index 3e8fddd9954..082d5e88ac7 100644
--- a/compiler/rustc_ast/src/ast.rs
+++ b/compiler/rustc_ast/src/ast.rs
@@ -114,8 +114,7 @@ impl PartialEq<Symbol> for Path {
 impl PartialEq<&[Symbol]> for Path {
     #[inline]
     fn eq(&self, names: &&[Symbol]) -> bool {
-        self.segments.len() == names.len()
-            && self.segments.iter().zip(names.iter()).all(|(s1, s2)| s1 == s2)
+        self.segments.iter().eq(*names)
     }
 }
 
diff --git a/compiler/rustc_ast/src/expand/autodiff_attrs.rs b/compiler/rustc_ast/src/expand/autodiff_attrs.rs
index 33451f99748..90f15753e99 100644
--- a/compiler/rustc_ast/src/expand/autodiff_attrs.rs
+++ b/compiler/rustc_ast/src/expand/autodiff_attrs.rs
@@ -6,6 +6,7 @@
 use std::fmt::{self, Display, Formatter};
 use std::str::FromStr;
 
+use crate::expand::typetree::TypeTree;
 use crate::expand::{Decodable, Encodable, HashStable_Generic};
 use crate::{Ty, TyKind};
 
@@ -84,6 +85,8 @@ pub struct AutoDiffItem {
     /// The name of the function being generated
     pub target: String,
     pub attrs: AutoDiffAttrs,
+    pub inputs: Vec<TypeTree>,
+    pub output: TypeTree,
 }
 
 #[derive(Clone, Eq, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
@@ -275,14 +278,22 @@ impl AutoDiffAttrs {
         !matches!(self.mode, DiffMode::Error | DiffMode::Source)
     }
 
-    pub fn into_item(self, source: String, target: String) -> AutoDiffItem {
-        AutoDiffItem { source, target, attrs: self }
+    pub fn into_item(
+        self,
+        source: String,
+        target: String,
+        inputs: Vec<TypeTree>,
+        output: TypeTree,
+    ) -> AutoDiffItem {
+        AutoDiffItem { source, target, inputs, output, attrs: self }
     }
 }
 
 impl fmt::Display for AutoDiffItem {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         write!(f, "Differentiating {} -> {}", self.source, self.target)?;
-        write!(f, " with attributes: {:?}", self.attrs)
+        write!(f, " with attributes: {:?}", self.attrs)?;
+        write!(f, " with inputs: {:?}", self.inputs)?;
+        write!(f, " with output: {:?}", self.output)
     }
 }
diff --git a/compiler/rustc_ast/src/expand/typetree.rs b/compiler/rustc_ast/src/expand/typetree.rs
index 9a2dd2e85e0..e7b4f3aff41 100644
--- a/compiler/rustc_ast/src/expand/typetree.rs
+++ b/compiler/rustc_ast/src/expand/typetree.rs
@@ -31,6 +31,7 @@ pub enum Kind {
     Half,
     Float,
     Double,
+    F128,
     Unknown,
 }
 
diff --git a/compiler/rustc_ast/src/lib.rs b/compiler/rustc_ast/src/lib.rs
index f1951049b47..5fe218776e5 100644
--- a/compiler/rustc_ast/src/lib.rs
+++ b/compiler/rustc_ast/src/lib.rs
@@ -15,6 +15,7 @@
 #![feature(associated_type_defaults)]
 #![feature(box_patterns)]
 #![feature(if_let_guard)]
+#![feature(iter_order_by)]
 #![feature(macro_metavar_expr)]
 #![feature(rustdoc_internals)]
 #![recursion_limit = "256"]
diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs
index a5d8fbfac61..4111182c3b7 100644
--- a/compiler/rustc_ast/src/tokenstream.rs
+++ b/compiler/rustc_ast/src/tokenstream.rs
@@ -48,9 +48,7 @@ impl TokenTree {
         match (self, other) {
             (TokenTree::Token(token, _), TokenTree::Token(token2, _)) => token.kind == token2.kind,
             (TokenTree::Delimited(.., delim, tts), TokenTree::Delimited(.., delim2, tts2)) => {
-                delim == delim2
-                    && tts.len() == tts2.len()
-                    && tts.iter().zip(tts2.iter()).all(|(a, b)| a.eq_unspanned(b))
+                delim == delim2 && tts.iter().eq_by(tts2.iter(), |a, b| a.eq_unspanned(b))
             }
             _ => false,
         }
diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs
index 9ab5b0b3547..608ccfefeb6 100644
--- a/compiler/rustc_ast_passes/src/feature_gate.rs
+++ b/compiler/rustc_ast_passes/src/feature_gate.rs
@@ -183,7 +183,7 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
                 gate_doc!(
                     "experimental" {
                         cfg => doc_cfg
-                        cfg_hide => doc_cfg_hide
+                        auto_cfg => doc_cfg
                         masked => doc_masked
                         notable_trait => doc_notable_trait
                     }
diff --git a/compiler/rustc_attr_parsing/messages.ftl b/compiler/rustc_attr_parsing/messages.ftl
index 81ec17077c1..6c5346e8355 100644
--- a/compiler/rustc_attr_parsing/messages.ftl
+++ b/compiler/rustc_attr_parsing/messages.ftl
@@ -8,7 +8,15 @@ attr_parsing_deprecated_item_suggestion =
 
 attr_parsing_empty_attribute =
     unused attribute
-    .suggestion = remove this attribute
+    .suggestion = {$valid_without_list ->
+        [true] remove these parentheses
+        *[other] remove this attribute
+    }
+    .note = {$valid_without_list ->
+        [true] using `{$attr_path}` with an empty list is equivalent to not using a list at all
+        *[other] using `{$attr_path}` with an empty list has no effect
+    }
+
 
 attr_parsing_invalid_target = `#[{$name}]` attribute cannot be used on {$target}
     .help = `#[{$name}]` can {$only}be applied to {$applied}
diff --git a/compiler/rustc_attr_parsing/src/context.rs b/compiler/rustc_attr_parsing/src/context.rs
index d7ccf3c7806..e8bb4caa416 100644
--- a/compiler/rustc_attr_parsing/src/context.rs
+++ b/compiler/rustc_attr_parsing/src/context.rs
@@ -597,7 +597,12 @@ impl<'f, 'sess: 'f, S: Stage> AcceptContext<'f, 'sess, S> {
     }
 
     pub(crate) fn warn_empty_attribute(&mut self, span: Span) {
-        self.emit_lint(AttributeLintKind::EmptyAttribute { first_span: span }, span);
+        let attr_path = self.attr_path.clone();
+        let valid_without_list = self.template.word;
+        self.emit_lint(
+            AttributeLintKind::EmptyAttribute { first_span: span, attr_path, valid_without_list },
+            span,
+        );
     }
 }
 
diff --git a/compiler/rustc_attr_parsing/src/lints.rs b/compiler/rustc_attr_parsing/src/lints.rs
index ab8ba0daf1f..3a2a3704669 100644
--- a/compiler/rustc_attr_parsing/src/lints.rs
+++ b/compiler/rustc_attr_parsing/src/lints.rs
@@ -43,12 +43,18 @@ pub fn emit_attribute_lint<L: LintEmitter>(lint: &AttributeLint<L::Id>, lint_emi
                     ),
                 },
             ),
-        AttributeLintKind::EmptyAttribute { first_span } => lint_emitter.emit_node_span_lint(
-            rustc_session::lint::builtin::UNUSED_ATTRIBUTES,
-            *id,
-            *first_span,
-            session_diagnostics::EmptyAttributeList { attr_span: *first_span },
-        ),
+        AttributeLintKind::EmptyAttribute { first_span, attr_path, valid_without_list } => {
+            lint_emitter.emit_node_span_lint(
+                rustc_session::lint::builtin::UNUSED_ATTRIBUTES,
+                *id,
+                *first_span,
+                session_diagnostics::EmptyAttributeList {
+                    attr_span: *first_span,
+                    attr_path: attr_path.clone(),
+                    valid_without_list: *valid_without_list,
+                },
+            )
+        }
         AttributeLintKind::InvalidTarget { name, target, applied, only } => lint_emitter
             .emit_node_span_lint(
                 // This check is here because `deprecated` had its own lint group and removing this would be a breaking change
diff --git a/compiler/rustc_attr_parsing/src/parser.rs b/compiler/rustc_attr_parsing/src/parser.rs
index 4f903594225..3f4f5679015 100644
--- a/compiler/rustc_attr_parsing/src/parser.rs
+++ b/compiler/rustc_attr_parsing/src/parser.rs
@@ -49,7 +49,7 @@ impl<'a> PathParser<'a> {
     }
 
     pub fn segments_is(&self, segments: &[Symbol]) -> bool {
-        self.len() == segments.len() && self.segments().zip(segments).all(|(a, b)| a.name == *b)
+        self.segments().map(|segment| &segment.name).eq(segments)
     }
 
     pub fn word(&self) -> Option<Ident> {
diff --git a/compiler/rustc_attr_parsing/src/session_diagnostics.rs b/compiler/rustc_attr_parsing/src/session_diagnostics.rs
index 2c2b14c8a68..1194ac5872c 100644
--- a/compiler/rustc_attr_parsing/src/session_diagnostics.rs
+++ b/compiler/rustc_attr_parsing/src/session_diagnostics.rs
@@ -503,9 +503,12 @@ pub(crate) struct EmptyConfusables {
 
 #[derive(LintDiagnostic)]
 #[diag(attr_parsing_empty_attribute)]
+#[note]
 pub(crate) struct EmptyAttributeList {
     #[suggestion(code = "", applicability = "machine-applicable")]
     pub attr_span: Span,
+    pub attr_path: AttrPath,
+    pub valid_without_list: bool,
 }
 
 #[derive(LintDiagnostic)]
diff --git a/compiler/rustc_borrowck/src/borrowck_errors.rs b/compiler/rustc_borrowck/src/borrowck_errors.rs
index c9be5575da5..7c9011505d6 100644
--- a/compiler/rustc_borrowck/src/borrowck_errors.rs
+++ b/compiler/rustc_borrowck/src/borrowck_errors.rs
@@ -426,7 +426,7 @@ impl<'infcx, 'tcx> crate::MirBorrowckCtxt<'_, 'infcx, 'tcx> {
     }
 
     pub(crate) fn path_does_not_live_long_enough(&self, span: Span, path: &str) -> Diag<'infcx> {
-        struct_span_code_err!(self.dcx(), span, E0597, "{} does not live long enough", path,)
+        struct_span_code_err!(self.dcx(), span, E0597, "{} does not live long enough", path)
     }
 
     pub(crate) fn cannot_return_reference_to_local(
@@ -480,7 +480,7 @@ impl<'infcx, 'tcx> crate::MirBorrowckCtxt<'_, 'infcx, 'tcx> {
     }
 
     pub(crate) fn temporary_value_borrowed_for_too_long(&self, span: Span) -> Diag<'infcx> {
-        struct_span_code_err!(self.dcx(), span, E0716, "temporary value dropped while borrowed",)
+        struct_span_code_err!(self.dcx(), span, E0716, "temporary value dropped while borrowed")
     }
 }
 
diff --git a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
index 7e20a5133e0..efb622e2155 100644
--- a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
@@ -2992,6 +2992,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
         self.buffer_error(err);
     }
 
+    #[tracing::instrument(level = "debug", skip(self, explanation))]
     fn report_local_value_does_not_live_long_enough(
         &self,
         location: Location,
@@ -3001,13 +3002,6 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> {
         borrow_spans: UseSpans<'tcx>,
         explanation: BorrowExplanation<'tcx>,
     ) -> Diag<'infcx> {
-        debug!(
-            "report_local_value_does_not_live_long_enough(\
-             {:?}, {:?}, {:?}, {:?}, {:?}\
-             )",
-            location, name, borrow, drop_span, borrow_spans
-        );
-
         let borrow_span = borrow_spans.var_or_use_path_span();
         if let BorrowExplanation::MustBeValidFor {
             category,
diff --git a/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs b/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs
index 7ca07bb9b43..638d89f5bcb 100644
--- a/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs
@@ -416,6 +416,26 @@ impl<'tcx> BorrowExplanation<'tcx> {
                 {
                     self.add_object_lifetime_default_note(tcx, err, unsize_ty);
                 }
+
+                let mut preds = path
+                    .iter()
+                    .filter_map(|constraint| match constraint.category {
+                        ConstraintCategory::Predicate(pred) if !pred.is_dummy() => Some(pred),
+                        _ => None,
+                    })
+                    .collect::<Vec<Span>>();
+                preds.sort();
+                preds.dedup();
+                if !preds.is_empty() {
+                    let s = if preds.len() == 1 { "" } else { "s" };
+                    err.span_note(
+                        preds,
+                        format!(
+                            "requirement{s} that the value outlives `{region_name}` introduced here"
+                        ),
+                    );
+                }
+
                 self.add_lifetime_bound_suggestion_to_diagnostic(err, &category, span, region_name);
             }
             _ => {}
diff --git a/compiler/rustc_builtin_macros/src/autodiff.rs b/compiler/rustc_builtin_macros/src/autodiff.rs
index f4a923797e2..ddc59bfe141 100644
--- a/compiler/rustc_builtin_macros/src/autodiff.rs
+++ b/compiler/rustc_builtin_macros/src/autodiff.rs
@@ -377,8 +377,7 @@ mod llvm_enzyme {
                 (ast::AttrKind::Normal(a), ast::AttrKind::Normal(b)) => {
                     let a = &a.item.path;
                     let b = &b.item.path;
-                    a.segments.len() == b.segments.len()
-                        && a.segments.iter().zip(b.segments.iter()).all(|(a, b)| a.ident == b.ident)
+                    a.segments.iter().eq_by(&b.segments, |a, b| a.ident == b.ident)
                 }
                 _ => false,
             }
diff --git a/compiler/rustc_builtin_macros/src/deriving/coerce_pointee.rs b/compiler/rustc_builtin_macros/src/deriving/coerce_pointee.rs
index 75db5d77783..5b378de8bbd 100644
--- a/compiler/rustc_builtin_macros/src/deriving/coerce_pointee.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/coerce_pointee.rs
@@ -356,21 +356,14 @@ fn contains_maybe_sized_bound(bounds: &[GenericBound]) -> bool {
     bounds.iter().any(is_maybe_sized_bound)
 }
 
-fn path_segment_is_exact_match(path_segments: &[ast::PathSegment], syms: &[Symbol]) -> bool {
-    path_segments.iter().zip(syms).all(|(segment, &symbol)| segment.ident.name == symbol)
-}
-
 fn is_sized_marker(path: &ast::Path) -> bool {
     const CORE_UNSIZE: [Symbol; 3] = [sym::core, sym::marker, sym::Sized];
     const STD_UNSIZE: [Symbol; 3] = [sym::std, sym::marker, sym::Sized];
-    if path.segments.len() == 4 && path.is_global() {
-        path_segment_is_exact_match(&path.segments[1..], &CORE_UNSIZE)
-            || path_segment_is_exact_match(&path.segments[1..], &STD_UNSIZE)
-    } else if path.segments.len() == 3 {
-        path_segment_is_exact_match(&path.segments, &CORE_UNSIZE)
-            || path_segment_is_exact_match(&path.segments, &STD_UNSIZE)
+    let segments = || path.segments.iter().map(|segment| segment.ident.name);
+    if path.is_global() {
+        segments().skip(1).eq(CORE_UNSIZE) || segments().skip(1).eq(STD_UNSIZE)
     } else {
-        *path == sym::Sized
+        segments().eq(CORE_UNSIZE) || segments().eq(STD_UNSIZE) || *path == sym::Sized
     }
 }
 
diff --git a/compiler/rustc_builtin_macros/src/format.rs b/compiler/rustc_builtin_macros/src/format.rs
index d70888205a5..bffc0407e81 100644
--- a/compiler/rustc_builtin_macros/src/format.rs
+++ b/compiler/rustc_builtin_macros/src/format.rs
@@ -768,7 +768,7 @@ fn report_missing_placeholders(
 
     if !found_foreign && invalid_refs.is_empty() {
         // Show example if user didn't use any format specifiers
-        let show_example = used.iter().all(|used| !used);
+        let show_example = !used.contains(&true);
 
         if !show_example {
             if unused.len() > 1 {
diff --git a/compiler/rustc_builtin_macros/src/lib.rs b/compiler/rustc_builtin_macros/src/lib.rs
index 4541e2cd3b4..57cf62ea612 100644
--- a/compiler/rustc_builtin_macros/src/lib.rs
+++ b/compiler/rustc_builtin_macros/src/lib.rs
@@ -11,6 +11,7 @@
 #![feature(box_patterns)]
 #![feature(decl_macro)]
 #![feature(if_let_guard)]
+#![feature(iter_order_by)]
 #![feature(proc_macro_internals)]
 #![feature(proc_macro_quote)]
 #![feature(rustdoc_internals)]
diff --git a/compiler/rustc_codegen_cranelift/src/abi/comments.rs b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
index c74efeb59f3..d1b2b9a502a 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/comments.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
@@ -89,7 +89,7 @@ pub(super) fn add_local_place_comments<'tcx>(
         format!("{:?}", local),
         format!("{:?}", ty),
         size.bytes(),
-        align.abi.bytes(),
+        align.bytes(),
         if extra.is_empty() { "" } else { "                " },
         extra,
     ));
diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
index 2031842062d..7a909a740b0 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
@@ -233,7 +233,7 @@ pub(super) fn from_casted_value<'tcx>(
         // It may also be smaller for example when the type is a wrapper around an integer with a
         // larger alignment than the integer.
         std::cmp::max(abi_param_size, layout_size),
-        u32::try_from(layout.align.abi.bytes()).unwrap(),
+        u32::try_from(layout.align.bytes()).unwrap(),
     );
     let mut block_params_iter = block_params.iter().copied();
     for (offset, _) in abi_params {
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index 41e11e1de61..2cc5b82ddd3 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -846,7 +846,7 @@ fn codegen_stmt<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, cur_block: Block, stmt:
                     let layout = fx.layout_of(fx.monomorphize(ty));
                     let val = match null_op {
                         NullOp::SizeOf => layout.size.bytes(),
-                        NullOp::AlignOf => layout.align.abi.bytes(),
+                        NullOp::AlignOf => layout.align.bytes(),
                         NullOp::OffsetOf(fields) => fx
                             .tcx
                             .offset_of_subfield(
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
index 286e02b986b..4c438742f3d 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
@@ -304,7 +304,7 @@ impl DebugContext {
         entry.set(gimli::DW_AT_decl_file, AttributeValue::FileIndex(Some(file_id)));
         entry.set(gimli::DW_AT_decl_line, AttributeValue::Udata(line));
 
-        entry.set(gimli::DW_AT_alignment, AttributeValue::Udata(static_layout.align.abi.bytes()));
+        entry.set(gimli::DW_AT_alignment, AttributeValue::Udata(static_layout.align.bytes()));
 
         let mut expr = Expression::new();
         expr.op_addr(address_for_data(data_id));
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/types.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/types.rs
index 25b922c8be4..0d49f32373c 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/types.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/types.rs
@@ -166,7 +166,7 @@ impl DebugContext {
         let tuple_entry = self.dwarf.unit.get_mut(tuple_type_id);
         tuple_entry.set(gimli::DW_AT_name, AttributeValue::StringRef(self.dwarf.strings.add(name)));
         tuple_entry.set(gimli::DW_AT_byte_size, AttributeValue::Udata(layout.size.bytes()));
-        tuple_entry.set(gimli::DW_AT_alignment, AttributeValue::Udata(layout.align.abi.bytes()));
+        tuple_entry.set(gimli::DW_AT_alignment, AttributeValue::Udata(layout.align.bytes()));
 
         for (i, (ty, dw_ty)) in components.into_iter().enumerate() {
             let member_id = self.dwarf.unit.add(tuple_type_id, gimli::DW_TAG_member);
@@ -178,9 +178,7 @@ impl DebugContext {
             member_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(dw_ty));
             member_entry.set(
                 gimli::DW_AT_alignment,
-                AttributeValue::Udata(
-                    FullyMonomorphizedLayoutCx(tcx).layout_of(ty).align.abi.bytes(),
-                ),
+                AttributeValue::Udata(FullyMonomorphizedLayoutCx(tcx).layout_of(ty).align.bytes()),
             );
             member_entry.set(
                 gimli::DW_AT_data_member_location,
diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs
index 8e34436fb5e..5fd7c4d4f41 100644
--- a/compiler/rustc_codegen_cranelift/src/lib.rs
+++ b/compiler/rustc_codegen_cranelift/src/lib.rs
@@ -165,6 +165,10 @@ impl CodegenBackend for CraneliftCodegenBackend {
         ""
     }
 
+    fn name(&self) -> &'static str {
+        "cranelift"
+    }
+
     fn init(&self, sess: &Session) {
         use rustc_session::config::{InstrumentCoverage, Lto};
         match sess.lto() {
diff --git a/compiler/rustc_codegen_cranelift/src/unsize.rs b/compiler/rustc_codegen_cranelift/src/unsize.rs
index 643c7feb89a..d994f3e32ec 100644
--- a/compiler/rustc_codegen_cranelift/src/unsize.rs
+++ b/compiler/rustc_codegen_cranelift/src/unsize.rs
@@ -167,7 +167,7 @@ pub(crate) fn size_and_align_of<'tcx>(
     if layout.is_sized() {
         return (
             fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64),
-            fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64),
+            fx.bcx.ins().iconst(fx.pointer_type, layout.align.bytes() as i64),
         );
     }
 
@@ -186,7 +186,7 @@ pub(crate) fn size_and_align_of<'tcx>(
             // times the unit size.
             (
                 fx.bcx.ins().imul_imm(info.unwrap(), unit.size.bytes() as i64),
-                fx.bcx.ins().iconst(fx.pointer_type, unit.align.abi.bytes() as i64),
+                fx.bcx.ins().iconst(fx.pointer_type, unit.align.bytes() as i64),
             )
         }
         ty::Foreign(_) => {
@@ -224,7 +224,7 @@ pub(crate) fn size_and_align_of<'tcx>(
             let unsized_offset_unadjusted = layout.fields.offset(i).bytes();
             let unsized_offset_unadjusted =
                 fx.bcx.ins().iconst(fx.pointer_type, unsized_offset_unadjusted as i64);
-            let sized_align = layout.align.abi.bytes();
+            let sized_align = layout.align.bytes();
             let sized_align = fx.bcx.ins().iconst(fx.pointer_type, sized_align as i64);
 
             // Recurse to get the size of the dynamically sized field (must be
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
index 4519fa1a270..04e10cf1708 100644
--- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -383,7 +383,7 @@ impl<'tcx> CPlace<'tcx> {
 
         let stack_slot = fx.create_stack_slot(
             u32::try_from(layout.size.bytes()).unwrap(),
-            u32::try_from(layout.align.abi.bytes()).unwrap(),
+            u32::try_from(layout.align.bytes()).unwrap(),
         );
         CPlace { inner: CPlaceInner::Addr(stack_slot, None), layout }
     }
@@ -641,8 +641,8 @@ impl<'tcx> CPlace<'tcx> {
                         let size = dst_layout.size.bytes();
                         // `emit_small_memory_copy` uses `u8` for alignments, just use the maximum
                         // alignment that fits in a `u8` if the actual alignment is larger.
-                        let src_align = src_layout.align.abi.bytes().try_into().unwrap_or(128);
-                        let dst_align = dst_layout.align.abi.bytes().try_into().unwrap_or(128);
+                        let src_align = src_layout.align.bytes().try_into().unwrap_or(128);
+                        let dst_align = dst_layout.align.bytes().try_into().unwrap_or(128);
                         fx.bcx.emit_small_memory_copy(
                             fx.target_config,
                             to_addr,
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index f7a7a3f8c7e..5657620879c 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -1383,6 +1383,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         _src_align: Align,
         size: RValue<'gcc>,
         flags: MemFlags,
+        _tt: Option<rustc_ast::expand::typetree::FncTree>, // Autodiff TypeTrees are LLVM-only, ignored in GCC backend
     ) {
         assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
         let size = self.intcast(size, self.type_size_t(), false);
diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs
index 9815fb07eaa..c9ae96777de 100644
--- a/compiler/rustc_codegen_gcc/src/context.rs
+++ b/compiler/rustc_codegen_gcc/src/context.rs
@@ -147,7 +147,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
             let layout = tcx
                 .layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(rust_type))
                 .unwrap();
-            let align = layout.align.abi.bytes();
+            let align = layout.align.bytes();
             // For types with size 1, the alignment can be 1 and only 1
             // So, we can skip the call to ``get_aligned`.
             // In the future, we can add a GCC API to query the type align,
@@ -186,9 +186,9 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
             (i128_type, u128_type)
         } else {
             /*let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.i128)).unwrap();
-            let i128_align = layout.align.abi.bytes();
+            let i128_align = layout.align.bytes();
             let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.u128)).unwrap();
-            let u128_align = layout.align.abi.bytes();*/
+            let u128_align = layout.align.bytes();*/
 
             // TODO(antoyo): re-enable the alignment when libgccjit fixed the issue in
             // gcc_jit_context_new_array_constructor (it should not use reinterpret_cast).
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
index a915f5d6418..99a4f9b9f7e 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -770,6 +770,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
                     scratch_align,
                     bx.const_usize(self.layout.size.bytes()),
                     MemFlags::empty(),
+                    None,
                 );
 
                 bx.lifetime_end(scratch, scratch_size);
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
index f76f933cad4..ec7eab8489a 100644
--- a/compiler/rustc_codegen_gcc/src/lib.rs
+++ b/compiler/rustc_codegen_gcc/src/lib.rs
@@ -184,6 +184,10 @@ impl CodegenBackend for GccCodegenBackend {
         crate::DEFAULT_LOCALE_RESOURCE
     }
 
+    fn name(&self) -> &'static str {
+        "gcc"
+    }
+
     fn init(&self, _sess: &Session) {
         #[cfg(feature = "master")]
         {
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index 861227f7c2a..1703cab942b 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -246,6 +246,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
                     scratch_align,
                     bx.const_usize(copy_bytes),
                     MemFlags::empty(),
+                    None,
                 );
                 bx.lifetime_end(llscratch, scratch_size);
             }
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
index abd63120397..896d6755c75 100644
--- a/compiler/rustc_codegen_llvm/src/allocator.rs
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -5,9 +5,10 @@ use rustc_ast::expand::allocator::{
 };
 use rustc_codegen_ssa::traits::BaseTypeCodegenMethods as _;
 use rustc_middle::bug;
-use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
 use rustc_middle::ty::TyCtxt;
 use rustc_session::config::{DebugInfo, OomStrategy};
+use rustc_span::sym;
 use rustc_symbol_mangling::mangle_internal_symbol;
 
 use crate::attributes::llfn_attrs_from_instance;
@@ -59,7 +60,26 @@ pub(crate) unsafe fn codegen(
             let from_name = mangle_internal_symbol(tcx, &global_fn_name(method.name));
             let to_name = mangle_internal_symbol(tcx, &default_fn_name(method.name));
 
-            create_wrapper_function(tcx, &cx, &from_name, Some(&to_name), &args, output, false);
+            let alloc_attr_flag = match method.name {
+                sym::alloc => CodegenFnAttrFlags::ALLOCATOR,
+                sym::dealloc => CodegenFnAttrFlags::DEALLOCATOR,
+                sym::realloc => CodegenFnAttrFlags::REALLOCATOR,
+                sym::alloc_zeroed => CodegenFnAttrFlags::ALLOCATOR_ZEROED,
+                _ => unreachable!("Unknown allocator method!"),
+            };
+
+            let mut attrs = CodegenFnAttrs::new();
+            attrs.flags |= alloc_attr_flag;
+            create_wrapper_function(
+                tcx,
+                &cx,
+                &from_name,
+                Some(&to_name),
+                &args,
+                output,
+                false,
+                &attrs,
+            );
         }
     }
 
@@ -72,6 +92,7 @@ pub(crate) unsafe fn codegen(
         &[usize, usize], // size, align
         None,
         true,
+        &CodegenFnAttrs::new(),
     );
 
     unsafe {
@@ -93,6 +114,7 @@ pub(crate) unsafe fn codegen(
             &[],
             None,
             false,
+            &CodegenFnAttrs::new(),
         );
     }
 
@@ -139,6 +161,7 @@ fn create_wrapper_function(
     args: &[&Type],
     output: Option<&Type>,
     no_return: bool,
+    attrs: &CodegenFnAttrs,
 ) {
     let ty = cx.type_func(args, output.unwrap_or_else(|| cx.type_void()));
     let llfn = declare_simple_fn(
@@ -150,8 +173,7 @@ fn create_wrapper_function(
         ty,
     );
 
-    let attrs = CodegenFnAttrs::new();
-    llfn_attrs_from_instance(cx, tcx, llfn, &attrs, None);
+    llfn_attrs_from_instance(cx, tcx, llfn, attrs, None);
 
     let no_return = if no_return {
         // -> ! DIFlagNoReturn
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 78107d95e5a..5ac3a87c158 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -563,6 +563,8 @@ fn enable_autodiff_settings(ad: &[config::AutoDiff]) {
             config::AutoDiff::Enable => {}
             // We handle this below
             config::AutoDiff::NoPostopt => {}
+            // Disables TypeTree generation
+            config::AutoDiff::NoTT => {}
         }
     }
     // This helps with handling enums for now.
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index a4dc4eb532f..5271d0b4bb8 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -2,6 +2,7 @@ use std::borrow::{Borrow, Cow};
 use std::ops::Deref;
 use std::{iter, ptr};
 
+use rustc_ast::expand::typetree::FncTree;
 pub(crate) mod autodiff;
 pub(crate) mod gpu_offload;
 
@@ -1107,11 +1108,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         src_align: Align,
         size: &'ll Value,
         flags: MemFlags,
+        tt: Option<FncTree>,
     ) {
         assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
         let size = self.intcast(size, self.type_isize(), false);
         let is_volatile = flags.contains(MemFlags::VOLATILE);
-        unsafe {
+        let memcpy = unsafe {
             llvm::LLVMRustBuildMemCpy(
                 self.llbuilder,
                 dst,
@@ -1120,7 +1122,16 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                 src_align.bytes() as c_uint,
                 size,
                 is_volatile,
-            );
+            )
+        };
+
+        // TypeTree metadata for memcpy is especially important: when Enzyme encounters
+        // a memcpy during autodiff, it needs to know the structure of the data being
+        // copied to properly track derivatives. For example, copying an array of floats
+        // vs. copying a struct with mixed types requires different derivative handling.
+        // The TypeTree tells Enzyme exactly what memory layout to expect.
+        if let Some(tt) = tt {
+            crate::typetree::add_tt(self.cx().llmod, self.cx().llcx, memcpy, tt);
         }
     }
 
diff --git a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs
index b66e3dfdeec..c3485f56391 100644
--- a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs
+++ b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs
@@ -1,6 +1,7 @@
 use std::ptr;
 
 use rustc_ast::expand::autodiff_attrs::{AutoDiffAttrs, DiffActivity, DiffMode};
+use rustc_ast::expand::typetree::FncTree;
 use rustc_codegen_ssa::common::TypeKind;
 use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, BuilderMethods};
 use rustc_middle::ty::{Instance, PseudoCanonicalInput, TyCtxt, TypingEnv};
@@ -294,6 +295,7 @@ pub(crate) fn generate_enzyme_call<'ll, 'tcx>(
     fn_args: &[&'ll Value],
     attrs: AutoDiffAttrs,
     dest: PlaceRef<'tcx, &'ll Value>,
+    fnc_tree: FncTree,
 ) {
     // We have to pick the name depending on whether we want forward or reverse mode autodiff.
     let mut ad_name: String = match attrs.mode {
@@ -370,6 +372,10 @@ pub(crate) fn generate_enzyme_call<'ll, 'tcx>(
         fn_args,
     );
 
+    if !fnc_tree.args.is_empty() || !fnc_tree.ret.0.is_empty() {
+        crate::typetree::add_tt(cx.llmod, cx.llcx, fn_to_diff, fnc_tree);
+    }
+
     let call = builder.call(enzyme_ty, None, None, ad_fn, &args, None, None);
 
     builder.store_to_place(call, dest.val);
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index a110ecbb75d..40375ef6510 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -564,7 +564,7 @@ impl<'ll> CodegenCx<'ll, '_> {
         let g = self.define_global(&sym, llty).unwrap_or_else(|| {
             bug!("symbol `{}` is already defined", sym);
         });
-        set_global_alignment(self, g, self.tcx.data_layout.i8_align.abi);
+        set_global_alignment(self, g, self.tcx.data_layout.i8_align);
         llvm::set_initializer(g, llval);
         llvm::set_linkage(g, llvm::Linkage::PrivateLinkage);
         llvm::set_section(g, c"__TEXT,__cstring,cstring_literals");
@@ -680,7 +680,7 @@ impl<'ll> CodegenCx<'ll, '_> {
         let methname_g = self.define_global(&methname_sym, methname_llty).unwrap_or_else(|| {
             bug!("symbol `{}` is already defined", methname_sym);
         });
-        set_global_alignment(self, methname_g, self.tcx.data_layout.i8_align.abi);
+        set_global_alignment(self, methname_g, self.tcx.data_layout.i8_align);
         llvm::set_initializer(methname_g, methname_llval);
         llvm::set_linkage(methname_g, llvm::Linkage::PrivateLinkage);
         llvm::set_section(
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index 4ba72cd61a0..1e4ace4ca92 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -117,7 +117,7 @@ fn build_fixed_size_array_di_node<'ll, 'tcx>(
         .try_to_target_usize(cx.tcx)
         .expect("expected monomorphic const in codegen") as c_longlong;
 
-    let subrange = unsafe { llvm::LLVMRustDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound) };
+    let subrange = unsafe { llvm::LLVMDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound) };
     let subscripts = &[subrange];
 
     let di_node = unsafe {
@@ -1043,7 +1043,7 @@ fn create_member_type<'ll, 'tcx>(
             file_metadata,
             line_number,
             layout.size.bits(),
-            layout.align.abi.bits() as u32,
+            layout.align.bits() as u32,
             offset.bits(),
             flags,
             type_di_node,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
index 62d38d463ab..1ae6e6e5eec 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
@@ -289,7 +289,7 @@ fn build_enum_variant_part_di_node<'ll, 'tcx>(
                 file_metadata,
                 line_number,
                 enum_type_and_layout.size.bits(),
-                enum_type_and_layout.align.abi.bits() as u32,
+                enum_type_and_layout.align.bits() as u32,
                 DIFlags::FlagZero,
                 tag_member_di_node,
                 create_DIArray(DIB(cx), &[]),
@@ -449,7 +449,7 @@ fn build_enum_variant_member_di_node<'ll, 'tcx>(
             file_di_node,
             line_number,
             enum_type_and_layout.size.bits(),
-            enum_type_and_layout.align.abi.bits() as u32,
+            enum_type_and_layout.align.bits() as u32,
             Size::ZERO.bits(),
             discr,
             DIFlags::FlagZero,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 126082aa3aa..af64e4ebed0 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -52,15 +52,6 @@ mod utils;
 use self::create_scope_map::compute_mir_scopes;
 pub(crate) use self::metadata::build_global_var_di_node;
 
-// FIXME(Zalathar): These `DW_TAG_*` constants are fake values that were
-// removed from LLVM in 2015, and are only used by our own `RustWrapper.cpp`
-// to decide which C++ API to call. Instead, we should just have two separate
-// FFI functions and choose the correct one on the Rust side.
-#[allow(non_upper_case_globals)]
-const DW_TAG_auto_variable: c_uint = 0x100;
-#[allow(non_upper_case_globals)]
-const DW_TAG_arg_variable: c_uint = 0x101;
-
 /// A context object for maintaining all state needed by the debuginfo module.
 pub(crate) struct CodegenUnitDebugContext<'ll, 'tcx> {
     llmod: &'ll llvm::Module,
@@ -174,35 +165,38 @@ impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> {
 
         if direct_offset.bytes() > 0 {
             addr_ops.push(DW_OP_plus_uconst);
-            addr_ops.push(direct_offset.bytes() as u64);
+            addr_ops.push(direct_offset.bytes());
         }
         for &offset in indirect_offsets {
             addr_ops.push(DW_OP_deref);
             if offset.bytes() > 0 {
                 addr_ops.push(DW_OP_plus_uconst);
-                addr_ops.push(offset.bytes() as u64);
+                addr_ops.push(offset.bytes());
             }
         }
         if let Some(fragment) = fragment {
             // `DW_OP_LLVM_fragment` takes as arguments the fragment's
             // offset and size, both of them in bits.
             addr_ops.push(DW_OP_LLVM_fragment);
-            addr_ops.push(fragment.start.bits() as u64);
-            addr_ops.push((fragment.end - fragment.start).bits() as u64);
+            addr_ops.push(fragment.start.bits());
+            addr_ops.push((fragment.end - fragment.start).bits());
         }
 
+        let di_builder = DIB(self.cx());
+        let addr_expr = unsafe {
+            llvm::LLVMDIBuilderCreateExpression(di_builder, addr_ops.as_ptr(), addr_ops.len())
+        };
         unsafe {
             // FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`.
-            llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
-                DIB(self.cx()),
+            llvm::LLVMDIBuilderInsertDeclareRecordAtEnd(
+                di_builder,
                 variable_alloca,
                 dbg_var,
-                addr_ops.as_ptr(),
-                addr_ops.len() as c_uint,
+                addr_expr,
                 dbg_loc,
                 self.llbb(),
-            );
-        }
+            )
+        };
     }
 
     fn set_dbg_loc(&mut self, dbg_loc: &'ll DILocation) {
@@ -630,28 +624,39 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
 
         let type_metadata = spanned_type_di_node(self, variable_type, span);
 
-        let (argument_index, dwarf_tag) = match variable_kind {
-            ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable),
-            LocalVariable => (0, DW_TAG_auto_variable),
-        };
         let align = self.align_of(variable_type);
 
         let name = variable_name.as_str();
-        unsafe {
-            llvm::LLVMRustDIBuilderCreateVariable(
-                DIB(self),
-                dwarf_tag,
-                scope_metadata,
-                name.as_c_char_ptr(),
-                name.len(),
-                file_metadata,
-                loc.line,
-                type_metadata,
-                true,
-                DIFlags::FlagZero,
-                argument_index,
-                align.bits() as u32,
-            )
+
+        match variable_kind {
+            ArgumentVariable(arg_index) => unsafe {
+                llvm::LLVMDIBuilderCreateParameterVariable(
+                    DIB(self),
+                    scope_metadata,
+                    name.as_ptr(),
+                    name.len(),
+                    arg_index as c_uint,
+                    file_metadata,
+                    loc.line,
+                    type_metadata,
+                    llvm::Bool::TRUE, // (preserve descriptor during optimizations)
+                    DIFlags::FlagZero,
+                )
+            },
+            LocalVariable => unsafe {
+                llvm::LLVMDIBuilderCreateAutoVariable(
+                    DIB(self),
+                    scope_metadata,
+                    name.as_ptr(),
+                    name.len(),
+                    file_metadata,
+                    loc.line,
+                    type_metadata,
+                    llvm::Bool::TRUE, // (preserve descriptor during optimizations)
+                    DIFlags::FlagZero,
+                    align.bits() as u32,
+                )
+            },
         }
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
index cc1d504b430..7e1e49310f6 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
@@ -28,7 +28,7 @@ pub(crate) fn create_DIArray<'ll>(
     builder: &DIBuilder<'ll>,
     arr: &[Option<&'ll DIDescriptor>],
 ) -> &'ll DIArray {
-    unsafe { llvm::LLVMRustDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32) }
+    unsafe { llvm::LLVMDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len()) }
 }
 
 #[inline]
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 50398a32142..467655b0bfc 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -297,7 +297,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 let align = if name == sym::unaligned_volatile_load {
                     1
                 } else {
-                    result.layout.align.abi.bytes() as u32
+                    result.layout.align.bytes() as u32
                 };
                 unsafe {
                     llvm::LLVMSetAlignment(load, align);
@@ -1047,7 +1047,7 @@ fn codegen_emcc_try<'ll, 'tcx>(
         // create an alloca and pass a pointer to that.
         let ptr_size = bx.tcx().data_layout.pointer_size();
         let ptr_align = bx.tcx().data_layout.pointer_align().abi;
-        let i8_align = bx.tcx().data_layout.i8_align.abi;
+        let i8_align = bx.tcx().data_layout.i8_align;
         // Required in order for there to be no padding between the fields.
         assert!(i8_align <= ptr_align);
         let catch_data = bx.alloca(2 * ptr_size, ptr_align);
@@ -1212,6 +1212,9 @@ fn codegen_autodiff<'ll, 'tcx>(
         &mut diff_attrs.input_activity,
     );
 
+    let fnc_tree =
+        rustc_middle::ty::fnc_typetrees(tcx, fn_source.ty(tcx, TypingEnv::fully_monomorphized()));
+
     // Build body
     generate_enzyme_call(
         bx,
@@ -1222,6 +1225,7 @@ fn codegen_autodiff<'ll, 'tcx>(
         &val_arr,
         diff_attrs.clone(),
         result,
+        fnc_tree,
     );
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index 13bdb7cb1a2..2405a25c702 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -68,6 +68,7 @@ mod llvm_util;
 mod mono_item;
 mod type_;
 mod type_of;
+mod typetree;
 mod va_arg;
 mod value;
 
@@ -231,6 +232,10 @@ impl CodegenBackend for LlvmCodegenBackend {
         crate::DEFAULT_LOCALE_RESOURCE
     }
 
+    fn name(&self) -> &'static str {
+        "llvm"
+    }
+
     fn init(&self, sess: &Session) {
         llvm_util::init(sess); // Make sure llvm is inited
     }
@@ -349,7 +354,14 @@ impl CodegenBackend for LlvmCodegenBackend {
 
         // Run the linker on any artifacts that resulted from the LLVM run.
         // This should produce either a finished executable or library.
-        link_binary(sess, &LlvmArchiveBuilderBuilder, codegen_results, metadata, outputs);
+        link_binary(
+            sess,
+            &LlvmArchiveBuilderBuilder,
+            codegen_results,
+            metadata,
+            outputs,
+            self.name(),
+        );
     }
 }
 
diff --git a/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs
index 695435eb6da..e63043b2122 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs
@@ -3,9 +3,36 @@
 use libc::{c_char, c_uint};
 
 use super::MetadataKindId;
-use super::ffi::{AttributeKind, BasicBlock, Metadata, Module, Type, Value};
+use super::ffi::{AttributeKind, BasicBlock, Context, Metadata, Module, Type, Value};
 use crate::llvm::{Bool, Builder};
 
+// TypeTree types
+pub(crate) type CTypeTreeRef = *mut EnzymeTypeTree;
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct EnzymeTypeTree {
+    _unused: [u8; 0],
+}
+
+#[repr(u32)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+#[allow(non_camel_case_types)]
+pub(crate) enum CConcreteType {
+    DT_Anything = 0,
+    DT_Integer = 1,
+    DT_Pointer = 2,
+    DT_Half = 3,
+    DT_Float = 4,
+    DT_Double = 5,
+    DT_Unknown = 6,
+    DT_FP128 = 9,
+}
+
+pub(crate) struct TypeTree {
+    pub(crate) inner: CTypeTreeRef,
+}
+
 #[link(name = "llvm-wrapper", kind = "static")]
 unsafe extern "C" {
     // Enzyme
@@ -68,10 +95,40 @@ pub(crate) mod Enzyme_AD {
 
     use libc::c_void;
 
+    use super::{CConcreteType, CTypeTreeRef, Context};
+
     unsafe extern "C" {
         pub(crate) fn EnzymeSetCLBool(arg1: *mut ::std::os::raw::c_void, arg2: u8);
         pub(crate) fn EnzymeSetCLString(arg1: *mut ::std::os::raw::c_void, arg2: *const c_char);
     }
+
+    // TypeTree functions
+    unsafe extern "C" {
+        pub(crate) fn EnzymeNewTypeTree() -> CTypeTreeRef;
+        pub(crate) fn EnzymeNewTypeTreeCT(arg1: CConcreteType, ctx: &Context) -> CTypeTreeRef;
+        pub(crate) fn EnzymeNewTypeTreeTR(arg1: CTypeTreeRef) -> CTypeTreeRef;
+        pub(crate) fn EnzymeFreeTypeTree(CTT: CTypeTreeRef);
+        pub(crate) fn EnzymeMergeTypeTree(arg1: CTypeTreeRef, arg2: CTypeTreeRef) -> bool;
+        pub(crate) fn EnzymeTypeTreeOnlyEq(arg1: CTypeTreeRef, pos: i64);
+        pub(crate) fn EnzymeTypeTreeData0Eq(arg1: CTypeTreeRef);
+        pub(crate) fn EnzymeTypeTreeShiftIndiciesEq(
+            arg1: CTypeTreeRef,
+            data_layout: *const c_char,
+            offset: i64,
+            max_size: i64,
+            add_offset: u64,
+        );
+        pub(crate) fn EnzymeTypeTreeInsertEq(
+            CTT: CTypeTreeRef,
+            indices: *const i64,
+            len: usize,
+            ct: CConcreteType,
+            ctx: &Context,
+        );
+        pub(crate) fn EnzymeTypeTreeToString(arg1: CTypeTreeRef) -> *const c_char;
+        pub(crate) fn EnzymeTypeTreeToStringFree(arg1: *const c_char);
+    }
+
     unsafe extern "C" {
         static mut EnzymePrintPerf: c_void;
         static mut EnzymePrintActivity: c_void;
@@ -141,6 +198,67 @@ pub(crate) use self::Fallback_AD::*;
 pub(crate) mod Fallback_AD {
     #![allow(unused_variables)]
 
+    use libc::c_char;
+
+    use super::{CConcreteType, CTypeTreeRef, Context};
+
+    // TypeTree function fallbacks
+    pub(crate) unsafe fn EnzymeNewTypeTree() -> CTypeTreeRef {
+        unimplemented!()
+    }
+
+    pub(crate) unsafe fn EnzymeNewTypeTreeCT(arg1: CConcreteType, ctx: &Context) -> CTypeTreeRef {
+        unimplemented!()
+    }
+
+    pub(crate) unsafe fn EnzymeNewTypeTreeTR(arg1: CTypeTreeRef) -> CTypeTreeRef {
+        unimplemented!()
+    }
+
+    pub(crate) unsafe fn EnzymeFreeTypeTree(CTT: CTypeTreeRef) {
+        unimplemented!()
+    }
+
+    pub(crate) unsafe fn EnzymeMergeTypeTree(arg1: CTypeTreeRef, arg2: CTypeTreeRef) -> bool {
+        unimplemented!()
+    }
+
+    pub(crate) unsafe fn EnzymeTypeTreeOnlyEq(arg1: CTypeTreeRef, pos: i64) {
+        unimplemented!()
+    }
+
+    pub(crate) unsafe fn EnzymeTypeTreeData0Eq(arg1: CTypeTreeRef) {
+        unimplemented!()
+    }
+
+    pub(crate) unsafe fn EnzymeTypeTreeShiftIndiciesEq(
+        arg1: CTypeTreeRef,
+        data_layout: *const c_char,
+        offset: i64,
+        max_size: i64,
+        add_offset: u64,
+    ) {
+        unimplemented!()
+    }
+
+    pub(crate) unsafe fn EnzymeTypeTreeInsertEq(
+        CTT: CTypeTreeRef,
+        indices: *const i64,
+        len: usize,
+        ct: CConcreteType,
+        ctx: &Context,
+    ) {
+        unimplemented!()
+    }
+
+    pub(crate) unsafe fn EnzymeTypeTreeToString(arg1: CTypeTreeRef) -> *const c_char {
+        unimplemented!()
+    }
+
+    pub(crate) unsafe fn EnzymeTypeTreeToStringFree(arg1: *const c_char) {
+        unimplemented!()
+    }
+
     pub(crate) fn set_inline(val: bool) {
         unimplemented!()
     }
@@ -169,3 +287,89 @@ pub(crate) mod Fallback_AD {
         unimplemented!()
     }
 }
+
+impl TypeTree {
+    pub(crate) fn new() -> TypeTree {
+        let inner = unsafe { EnzymeNewTypeTree() };
+        TypeTree { inner }
+    }
+
+    pub(crate) fn from_type(t: CConcreteType, ctx: &Context) -> TypeTree {
+        let inner = unsafe { EnzymeNewTypeTreeCT(t, ctx) };
+        TypeTree { inner }
+    }
+
+    pub(crate) fn merge(self, other: Self) -> Self {
+        unsafe {
+            EnzymeMergeTypeTree(self.inner, other.inner);
+        }
+        drop(other);
+        self
+    }
+
+    #[must_use]
+    pub(crate) fn shift(
+        self,
+        layout: &str,
+        offset: isize,
+        max_size: isize,
+        add_offset: usize,
+    ) -> Self {
+        let layout = std::ffi::CString::new(layout).unwrap();
+
+        unsafe {
+            EnzymeTypeTreeShiftIndiciesEq(
+                self.inner,
+                layout.as_ptr(),
+                offset as i64,
+                max_size as i64,
+                add_offset as u64,
+            );
+        }
+
+        self
+    }
+
+    pub(crate) fn insert(&mut self, indices: &[i64], ct: CConcreteType, ctx: &Context) {
+        unsafe {
+            EnzymeTypeTreeInsertEq(self.inner, indices.as_ptr(), indices.len(), ct, ctx);
+        }
+    }
+}
+
+impl Clone for TypeTree {
+    fn clone(&self) -> Self {
+        let inner = unsafe { EnzymeNewTypeTreeTR(self.inner) };
+        TypeTree { inner }
+    }
+}
+
+impl std::fmt::Display for TypeTree {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        let ptr = unsafe { EnzymeTypeTreeToString(self.inner) };
+        let cstr = unsafe { std::ffi::CStr::from_ptr(ptr) };
+        match cstr.to_str() {
+            Ok(x) => write!(f, "{}", x)?,
+            Err(err) => write!(f, "could not parse: {}", err)?,
+        }
+
+        // delete C string pointer
+        unsafe {
+            EnzymeTypeTreeToStringFree(ptr);
+        }
+
+        Ok(())
+    }
+}
+
+impl std::fmt::Debug for TypeTree {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        <Self as std::fmt::Display>::fmt(self, f)
+    }
+}
+
+impl Drop for TypeTree {
+    fn drop(&mut self) {
+        unsafe { EnzymeFreeTypeTree(self.inner) }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 38a6a311954..afd2991a09c 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -25,8 +25,8 @@ use rustc_target::spec::SymbolVisibility;
 use super::RustString;
 use super::debuginfo::{
     DIArray, DIBuilder, DIDerivedType, DIDescriptor, DIEnumerator, DIFile, DIFlags,
-    DIGlobalVariableExpression, DILocation, DISPFlags, DIScope, DISubprogram, DISubrange,
-    DITemplateTypeParameter, DIType, DIVariable, DebugEmissionKind, DebugNameTableKind,
+    DIGlobalVariableExpression, DILocation, DISPFlags, DIScope, DISubprogram,
+    DITemplateTypeParameter, DIType, DebugEmissionKind, DebugNameTableKind,
 };
 use crate::llvm;
 
@@ -807,6 +807,8 @@ unsafe extern "C" {
     pub(crate) type Metadata;
     pub(crate) type BasicBlock;
     pub(crate) type Comdat;
+    /// `&'ll DbgRecord` represents `LLVMDbgRecordRef`.
+    pub(crate) type DbgRecord;
 }
 #[repr(C)]
 pub(crate) struct Builder<'a>(InvariantOpaque<'a>);
@@ -891,7 +893,6 @@ pub(crate) mod debuginfo {
     pub(crate) type DIVariable = DIDescriptor;
     pub(crate) type DIGlobalVariableExpression = DIDescriptor;
     pub(crate) type DIArray = DIDescriptor;
-    pub(crate) type DISubrange = DIDescriptor;
     pub(crate) type DIEnumerator = DIDescriptor;
     pub(crate) type DITemplateTypeParameter = DIDescriptor;
 
@@ -1992,6 +1993,59 @@ unsafe extern "C" {
         Scope: Option<&'ll Metadata>,
         AlignInBits: u32, // (optional; default is 0)
     ) -> &'ll Metadata;
+
+    pub(crate) fn LLVMDIBuilderGetOrCreateSubrange<'ll>(
+        Builder: &DIBuilder<'ll>,
+        LowerBound: i64,
+        Count: i64,
+    ) -> &'ll Metadata;
+
+    pub(crate) fn LLVMDIBuilderGetOrCreateArray<'ll>(
+        Builder: &DIBuilder<'ll>,
+        Data: *const Option<&'ll Metadata>,
+        NumElements: size_t,
+    ) -> &'ll Metadata;
+
+    pub(crate) fn LLVMDIBuilderCreateExpression<'ll>(
+        Builder: &DIBuilder<'ll>,
+        Addr: *const u64,
+        Length: size_t,
+    ) -> &'ll Metadata;
+
+    pub(crate) fn LLVMDIBuilderInsertDeclareRecordAtEnd<'ll>(
+        Builder: &DIBuilder<'ll>,
+        Storage: &'ll Value,
+        VarInfo: &'ll Metadata,
+        Expr: &'ll Metadata,
+        DebugLoc: &'ll Metadata,
+        Block: &'ll BasicBlock,
+    ) -> &'ll DbgRecord;
+
+    pub(crate) fn LLVMDIBuilderCreateAutoVariable<'ll>(
+        Builder: &DIBuilder<'ll>,
+        Scope: &'ll Metadata,
+        Name: *const c_uchar, // See "PTR_LEN_STR".
+        NameLen: size_t,
+        File: &'ll Metadata,
+        LineNo: c_uint,
+        Ty: &'ll Metadata,
+        AlwaysPreserve: llvm::Bool, // "If true, this descriptor will survive optimizations."
+        Flags: DIFlags,
+        AlignInBits: u32,
+    ) -> &'ll Metadata;
+
+    pub(crate) fn LLVMDIBuilderCreateParameterVariable<'ll>(
+        Builder: &DIBuilder<'ll>,
+        Scope: &'ll Metadata,
+        Name: *const c_uchar, // See "PTR_LEN_STR".
+        NameLen: size_t,
+        ArgNo: c_uint,
+        File: &'ll Metadata,
+        LineNo: c_uint,
+        Ty: &'ll Metadata,
+        AlwaysPreserve: llvm::Bool, // "If true, this descriptor will survive optimizations."
+        Flags: DIFlags,
+    ) -> &'ll Metadata;
 }
 
 #[link(name = "llvm-wrapper", kind = "static")]
@@ -2358,43 +2412,6 @@ unsafe extern "C" {
         AlignInBits: u32,
     ) -> &'a DIGlobalVariableExpression;
 
-    pub(crate) fn LLVMRustDIBuilderCreateVariable<'a>(
-        Builder: &DIBuilder<'a>,
-        Tag: c_uint,
-        Scope: &'a DIDescriptor,
-        Name: *const c_char,
-        NameLen: size_t,
-        File: &'a DIFile,
-        LineNo: c_uint,
-        Ty: &'a DIType,
-        AlwaysPreserve: bool,
-        Flags: DIFlags,
-        ArgNo: c_uint,
-        AlignInBits: u32,
-    ) -> &'a DIVariable;
-
-    pub(crate) fn LLVMRustDIBuilderGetOrCreateSubrange<'a>(
-        Builder: &DIBuilder<'a>,
-        Lo: i64,
-        Count: i64,
-    ) -> &'a DISubrange;
-
-    pub(crate) fn LLVMRustDIBuilderGetOrCreateArray<'a>(
-        Builder: &DIBuilder<'a>,
-        Ptr: *const Option<&'a DIDescriptor>,
-        Count: c_uint,
-    ) -> &'a DIArray;
-
-    pub(crate) fn LLVMRustDIBuilderInsertDeclareAtEnd<'a>(
-        Builder: &DIBuilder<'a>,
-        Val: &'a Value,
-        VarInfo: &'a DIVariable,
-        AddrOps: *const u64,
-        AddrOpsCount: c_uint,
-        DL: &'a DILocation,
-        InsertAtEnd: &'a BasicBlock,
-    );
-
     pub(crate) fn LLVMRustDIBuilderCreateEnumerator<'a>(
         Builder: &DIBuilder<'a>,
         Name: *const c_char,
diff --git a/compiler/rustc_codegen_llvm/src/typetree.rs b/compiler/rustc_codegen_llvm/src/typetree.rs
new file mode 100644
index 00000000000..7e263503700
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/typetree.rs
@@ -0,0 +1,122 @@
+use rustc_ast::expand::typetree::FncTree;
+#[cfg(feature = "llvm_enzyme")]
+use {
+    crate::attributes,
+    rustc_ast::expand::typetree::TypeTree as RustTypeTree,
+    std::ffi::{CString, c_char, c_uint},
+};
+
+use crate::llvm::{self, Value};
+
+#[cfg(feature = "llvm_enzyme")]
+fn to_enzyme_typetree(
+    rust_typetree: RustTypeTree,
+    _data_layout: &str,
+    llcx: &llvm::Context,
+) -> llvm::TypeTree {
+    let mut enzyme_tt = llvm::TypeTree::new();
+    process_typetree_recursive(&mut enzyme_tt, &rust_typetree, &[], llcx);
+    enzyme_tt
+}
+#[cfg(feature = "llvm_enzyme")]
+fn process_typetree_recursive(
+    enzyme_tt: &mut llvm::TypeTree,
+    rust_typetree: &RustTypeTree,
+    parent_indices: &[i64],
+    llcx: &llvm::Context,
+) {
+    for rust_type in &rust_typetree.0 {
+        let concrete_type = match rust_type.kind {
+            rustc_ast::expand::typetree::Kind::Anything => llvm::CConcreteType::DT_Anything,
+            rustc_ast::expand::typetree::Kind::Integer => llvm::CConcreteType::DT_Integer,
+            rustc_ast::expand::typetree::Kind::Pointer => llvm::CConcreteType::DT_Pointer,
+            rustc_ast::expand::typetree::Kind::Half => llvm::CConcreteType::DT_Half,
+            rustc_ast::expand::typetree::Kind::Float => llvm::CConcreteType::DT_Float,
+            rustc_ast::expand::typetree::Kind::Double => llvm::CConcreteType::DT_Double,
+            rustc_ast::expand::typetree::Kind::F128 => llvm::CConcreteType::DT_FP128,
+            rustc_ast::expand::typetree::Kind::Unknown => llvm::CConcreteType::DT_Unknown,
+        };
+
+        let mut indices = parent_indices.to_vec();
+        if !parent_indices.is_empty() {
+            indices.push(rust_type.offset as i64);
+        } else if rust_type.offset == -1 {
+            indices.push(-1);
+        } else {
+            indices.push(rust_type.offset as i64);
+        }
+
+        enzyme_tt.insert(&indices, concrete_type, llcx);
+
+        if rust_type.kind == rustc_ast::expand::typetree::Kind::Pointer
+            && !rust_type.child.0.is_empty()
+        {
+            process_typetree_recursive(enzyme_tt, &rust_type.child, &indices, llcx);
+        }
+    }
+}
+
+#[cfg(feature = "llvm_enzyme")]
+pub(crate) fn add_tt<'ll>(
+    llmod: &'ll llvm::Module,
+    llcx: &'ll llvm::Context,
+    fn_def: &'ll Value,
+    tt: FncTree,
+) {
+    let inputs = tt.args;
+    let ret_tt: RustTypeTree = tt.ret;
+
+    let llvm_data_layout: *const c_char = unsafe { llvm::LLVMGetDataLayoutStr(&*llmod) };
+    let llvm_data_layout =
+        std::str::from_utf8(unsafe { std::ffi::CStr::from_ptr(llvm_data_layout) }.to_bytes())
+            .expect("got a non-UTF8 data-layout from LLVM");
+
+    let attr_name = "enzyme_type";
+    let c_attr_name = CString::new(attr_name).unwrap();
+
+    for (i, input) in inputs.iter().enumerate() {
+        unsafe {
+            let enzyme_tt = to_enzyme_typetree(input.clone(), llvm_data_layout, llcx);
+            let c_str = llvm::EnzymeTypeTreeToString(enzyme_tt.inner);
+            let c_str = std::ffi::CStr::from_ptr(c_str);
+
+            let attr = llvm::LLVMCreateStringAttribute(
+                llcx,
+                c_attr_name.as_ptr(),
+                c_attr_name.as_bytes().len() as c_uint,
+                c_str.as_ptr(),
+                c_str.to_bytes().len() as c_uint,
+            );
+
+            attributes::apply_to_llfn(fn_def, llvm::AttributePlace::Argument(i as u32), &[attr]);
+            llvm::EnzymeTypeTreeToStringFree(c_str.as_ptr());
+        }
+    }
+
+    unsafe {
+        let enzyme_tt = to_enzyme_typetree(ret_tt, llvm_data_layout, llcx);
+        let c_str = llvm::EnzymeTypeTreeToString(enzyme_tt.inner);
+        let c_str = std::ffi::CStr::from_ptr(c_str);
+
+        let ret_attr = llvm::LLVMCreateStringAttribute(
+            llcx,
+            c_attr_name.as_ptr(),
+            c_attr_name.as_bytes().len() as c_uint,
+            c_str.as_ptr(),
+            c_str.to_bytes().len() as c_uint,
+        );
+
+        attributes::apply_to_llfn(fn_def, llvm::AttributePlace::ReturnValue, &[ret_attr]);
+        llvm::EnzymeTypeTreeToStringFree(c_str.as_ptr());
+    }
+}
+
+#[cfg(not(feature = "llvm_enzyme"))]
+pub(crate) fn add_tt<'ll>(
+    _llmod: &'ll llvm::Module,
+    _llcx: &'ll llvm::Context,
+    _fn_def: &'ll Value,
+    _tt: FncTree,
+) {
+    unimplemented!()
+}
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index ab08125217f..234366e491c 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -193,7 +193,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
     // the offset again.
 
     bx.switch_to_block(maybe_reg);
-    if gr_type && layout.align.abi.bytes() > 8 {
+    if gr_type && layout.align.bytes() > 8 {
         reg_off_v = bx.add(reg_off_v, bx.const_i32(15));
         reg_off_v = bx.and(reg_off_v, bx.const_i32(-16));
     }
@@ -291,7 +291,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>(
         bx.inbounds_ptradd(va_list_addr, bx.const_usize(1)) // fpr
     };
 
-    let mut num_regs = bx.load(bx.type_i8(), num_regs_addr, dl.i8_align.abi);
+    let mut num_regs = bx.load(bx.type_i8(), num_regs_addr, dl.i8_align);
 
     // "Align" the register count when the type is passed as `i64`.
     if is_i64 || (is_f64 && is_soft_float_abi) {
@@ -329,7 +329,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>(
         // Increase the used-register count.
         let reg_incr = if is_i64 || (is_f64 && is_soft_float_abi) { 2 } else { 1 };
         let new_num_regs = bx.add(num_regs, bx.cx.const_u8(reg_incr));
-        bx.store(new_num_regs, num_regs_addr, dl.i8_align.abi);
+        bx.store(new_num_regs, num_regs_addr, dl.i8_align);
 
         bx.br(end);
 
@@ -339,7 +339,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>(
     let mem_addr = {
         bx.switch_to_block(in_mem);
 
-        bx.store(bx.const_u8(max_regs), num_regs_addr, dl.i8_align.abi);
+        bx.store(bx.const_u8(max_regs), num_regs_addr, dl.i8_align);
 
         // Everything in the overflow area is rounded up to a size of at least 4.
         let overflow_area_align = Align::from_bytes(4).unwrap();
@@ -738,6 +738,7 @@ fn copy_to_temporary_if_more_aligned<'ll, 'tcx>(
             src_align,
             bx.const_u32(layout.layout.size().bytes() as u32),
             MemFlags::empty(),
+            None,
         );
         tmp
     } else {
@@ -760,7 +761,7 @@ fn x86_64_sysv64_va_arg_from_memory<'ll, 'tcx>(
     // byte boundary if alignment needed by type exceeds 8 byte boundary.
     // It isn't stated explicitly in the standard, but in practice we use
     // alignment greater than 16 where necessary.
-    if layout.layout.align.abi.bytes() > 8 {
+    if layout.layout.align.bytes() > 8 {
         unreachable!("all instances of VaArgSafe have an alignment <= 8");
     }
 
@@ -813,7 +814,7 @@ fn emit_xtensa_va_arg<'ll, 'tcx>(
     let va_ndx_offset = va_reg_offset + 4;
     let offset_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(va_ndx_offset));
 
-    let offset = bx.load(bx.type_i32(), offset_ptr, bx.tcx().data_layout.i32_align.abi);
+    let offset = bx.load(bx.type_i32(), offset_ptr, bx.tcx().data_layout.i32_align);
     let offset = round_up_to_alignment(bx, offset, layout.align.abi);
 
     let slot_size = layout.size.align_to(Align::from_bytes(4).unwrap()).bytes() as i32;
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
index d6c304c1b14..db2f2dd65b0 100644
--- a/compiler/rustc_codegen_ssa/src/back/link.rs
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -79,6 +79,7 @@ pub fn link_binary(
     codegen_results: CodegenResults,
     metadata: EncodedMetadata,
     outputs: &OutputFilenames,
+    codegen_backend: &'static str,
 ) {
     let _timer = sess.timer("link_binary");
     let output_metadata = sess.opts.output_types.contains_key(&OutputType::Metadata);
@@ -154,6 +155,7 @@ pub fn link_binary(
                         &codegen_results,
                         &metadata,
                         path.as_ref(),
+                        codegen_backend,
                     );
                 }
             }
@@ -680,6 +682,7 @@ fn link_natively(
     codegen_results: &CodegenResults,
     metadata: &EncodedMetadata,
     tmpdir: &Path,
+    codegen_backend: &'static str,
 ) {
     info!("preparing {:?} to {:?}", crate_type, out_filename);
     let (linker_path, flavor) = linker_and_flavor(sess);
@@ -705,6 +708,7 @@ fn link_natively(
         codegen_results,
         metadata,
         self_contained_components,
+        codegen_backend,
     );
 
     linker::disable_localization(&mut cmd);
@@ -2208,6 +2212,7 @@ fn linker_with_args(
     codegen_results: &CodegenResults,
     metadata: &EncodedMetadata,
     self_contained_components: LinkSelfContainedComponents,
+    codegen_backend: &'static str,
 ) -> Command {
     let self_contained_crt_objects = self_contained_components.is_crt_objects_enabled();
     let cmd = &mut *super::linker::get_linker(
@@ -2216,6 +2221,7 @@ fn linker_with_args(
         flavor,
         self_contained_components.are_any_components_enabled(),
         &codegen_results.crate_info.target_cpu,
+        codegen_backend,
     );
     let link_output_kind = link_output_kind(sess, crate_type);
 
diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs
index 624ab1b5084..e644a43f883 100644
--- a/compiler/rustc_codegen_ssa/src/back/linker.rs
+++ b/compiler/rustc_codegen_ssa/src/back/linker.rs
@@ -52,6 +52,7 @@ pub(crate) fn get_linker<'a>(
     flavor: LinkerFlavor,
     self_contained: bool,
     target_cpu: &'a str,
+    codegen_backend: &'static str,
 ) -> Box<dyn Linker + 'a> {
     let msvc_tool = find_msvc_tools::find_tool(&sess.target.arch, "link.exe");
 
@@ -154,6 +155,7 @@ pub(crate) fn get_linker<'a>(
             is_ld: cc == Cc::No,
             is_gnu: flavor.is_gnu(),
             uses_lld: flavor.uses_lld(),
+            codegen_backend,
         }) as Box<dyn Linker>,
         LinkerFlavor::Msvc(..) => Box::new(MsvcLinker { cmd, sess }) as Box<dyn Linker>,
         LinkerFlavor::EmCc => Box::new(EmLinker { cmd, sess }) as Box<dyn Linker>,
@@ -367,6 +369,7 @@ struct GccLinker<'a> {
     is_ld: bool,
     is_gnu: bool,
     uses_lld: bool,
+    codegen_backend: &'static str,
 }
 
 impl<'a> GccLinker<'a> {
@@ -423,9 +426,15 @@ impl<'a> GccLinker<'a> {
         if let Some(path) = &self.sess.opts.unstable_opts.profile_sample_use {
             self.link_arg(&format!("-plugin-opt=sample-profile={}", path.display()));
         };
+        let prefix = if self.codegen_backend == "gcc" {
+            // The GCC linker plugin requires a leading dash.
+            "-"
+        } else {
+            ""
+        };
         self.link_args(&[
-            &format!("-plugin-opt={opt_level}"),
-            &format!("-plugin-opt=mcpu={}", self.target_cpu),
+            &format!("-plugin-opt={prefix}{opt_level}"),
+            &format!("-plugin-opt={prefix}mcpu={}", self.target_cpu),
         ]);
     }
 
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index 1b218a0d339..b2dc4fe32b0 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -1626,6 +1626,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     align,
                     bx.const_usize(copy_bytes),
                     MemFlags::empty(),
+                    None,
                 );
                 // ...and then load it with the ABI type.
                 llval = load_cast(bx, cast, llscratch, scratch_align);
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 3c667b8e882..befa00c6861 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -30,7 +30,7 @@ fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     if allow_overlap {
         bx.memmove(dst, align, src, align, size, flags);
     } else {
-        bx.memcpy(dst, align, src, align, size, flags);
+        bx.memcpy(dst, align, src, align, size, flags, None);
     }
 }
 
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 2602bf82095..0a4b0f8d494 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -617,7 +617,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     }
                     mir::NullOp::AlignOf => {
                         assert!(bx.cx().type_is_sized(ty));
-                        let val = layout.align.abi.bytes();
+                        let val = layout.align.bytes();
                         bx.cx().const_usize(val)
                     }
                     mir::NullOp::OffsetOf(fields) => {
diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs
index f164e0f9123..0a50d7f18db 100644
--- a/compiler/rustc_codegen_ssa/src/mir/statement.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs
@@ -90,7 +90,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 let align = pointee_layout.align;
                 let dst = dst_val.immediate();
                 let src = src_val.immediate();
-                bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty());
+                bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty(), None);
             }
             mir::StatementKind::FakeRead(..)
             | mir::StatementKind::Retag { .. }
diff --git a/compiler/rustc_codegen_ssa/src/size_of_val.rs b/compiler/rustc_codegen_ssa/src/size_of_val.rs
index 577012151e4..e1bd8014d7a 100644
--- a/compiler/rustc_codegen_ssa/src/size_of_val.rs
+++ b/compiler/rustc_codegen_ssa/src/size_of_val.rs
@@ -21,7 +21,7 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     trace!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}", t, info, layout);
     if layout.is_sized() {
         let size = bx.const_usize(layout.size.bytes());
-        let align = bx.const_usize(layout.align.abi.bytes());
+        let align = bx.const_usize(layout.align.bytes());
         return (size, align);
     }
     match t.kind() {
@@ -49,7 +49,7 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
                 // All slice sizes must fit into `isize`, so this multiplication cannot
                 // wrap -- neither signed nor unsigned.
                 bx.unchecked_sumul(info.unwrap(), bx.const_usize(unit.size.bytes())),
-                bx.const_usize(unit.align.abi.bytes()),
+                bx.const_usize(unit.align.bytes()),
             )
         }
         ty::Foreign(_) => {
@@ -82,7 +82,7 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
 
             // This function does not return so we can now return whatever we want.
             let size = bx.const_usize(layout.size.bytes());
-            let align = bx.const_usize(layout.align.abi.bytes());
+            let align = bx.const_usize(layout.align.bytes());
             (size, align)
         }
         ty::Adt(..) | ty::Tuple(..) => {
@@ -94,7 +94,7 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
 
             let i = layout.fields.count() - 1;
             let unsized_offset_unadjusted = layout.fields.offset(i).bytes();
-            let sized_align = layout.align.abi.bytes();
+            let sized_align = layout.align.bytes();
             debug!(
                 "DST {} offset of dyn field: {}, statically sized align: {}",
                 t, unsized_offset_unadjusted, sized_align
diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs
index 29ec7eb1da3..2400160075e 100644
--- a/compiler/rustc_codegen_ssa/src/traits/backend.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs
@@ -41,6 +41,8 @@ pub trait CodegenBackend {
     /// Called before `init` so that all other functions are able to emit translatable diagnostics.
     fn locale_resource(&self) -> &'static str;
 
+    fn name(&self) -> &'static str;
+
     fn init(&self, _sess: &Session) {}
 
     fn print(&self, _req: &PrintRequest, _out: &mut String, _sess: &Session) {}
@@ -96,7 +98,14 @@ pub trait CodegenBackend {
         metadata: EncodedMetadata,
         outputs: &OutputFilenames,
     ) {
-        link_binary(sess, &ArArchiveBuilderBuilder, codegen_results, metadata, outputs);
+        link_binary(
+            sess,
+            &ArArchiveBuilderBuilder,
+            codegen_results,
+            metadata,
+            outputs,
+            self.name(),
+        );
     }
 }
 
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
index 4a5694e97fa..60296e36e0c 100644
--- a/compiler/rustc_codegen_ssa/src/traits/builder.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -451,6 +451,7 @@ pub trait BuilderMethods<'a, 'tcx>:
         src_align: Align,
         size: Self::Value,
         flags: MemFlags,
+        tt: Option<rustc_ast::expand::typetree::FncTree>,
     );
     fn memmove(
         &mut self,
@@ -507,7 +508,7 @@ pub trait BuilderMethods<'a, 'tcx>:
             temp.val.store_with_flags(self, dst.with_type(layout), flags);
         } else if !layout.is_zst() {
             let bytes = self.const_usize(layout.size.bytes());
-            self.memcpy(dst.llval, dst.align, src.llval, src.align, bytes, flags);
+            self.memcpy(dst.llval, dst.align, src.llval, src.align, bytes, flags, None);
         }
     }
 
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index 74f8a0a7b09..f0819423aa0 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -528,7 +528,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 if !layout.is_sized() {
                     span_bug!(self.cur_span(), "unsized type for `NullaryOp::AlignOf`");
                 }
-                let val = layout.align.abi.bytes();
+                let val = layout.align.bytes();
                 ImmTy::from_uint(val, usize_layout())
             }
             OffsetOf(fields) => {
diff --git a/compiler/rustc_const_eval/src/util/alignment.rs b/compiler/rustc_const_eval/src/util/alignment.rs
index 9507b24f603..9aafc7efd8a 100644
--- a/compiler/rustc_const_eval/src/util/alignment.rs
+++ b/compiler/rustc_const_eval/src/util/alignment.rs
@@ -37,7 +37,7 @@ where
             debug!(
                 "is_disaligned({:?}) - align = {}, packed = {}; not disaligned",
                 place,
-                layout.align.abi.bytes(),
+                layout.align.bytes(),
                 pack.bytes()
             );
             false
diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
index b1f29598750..1dea7e4252d 100644
--- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
+++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
@@ -129,7 +129,7 @@ fn check_validity_requirement_lax<'tcx>(
     if let Some(pointee) = this.ty.builtin_deref(false) {
         let pointee = cx.layout_of(pointee)?;
         // We need to ensure that the LLVM attributes `aligned` and `dereferenceable(size)` are satisfied.
-        if pointee.align.abi.bytes() > 1 {
+        if pointee.align.bytes() > 1 {
             // 0x01-filling is not aligned.
             return Ok(false);
         }
diff --git a/compiler/rustc_expand/src/mbe/macro_check.rs b/compiler/rustc_expand/src/mbe/macro_check.rs
index ebd6e887f7d..0eae44a05e7 100644
--- a/compiler/rustc_expand/src/mbe/macro_check.rs
+++ b/compiler/rustc_expand/src/mbe/macro_check.rs
@@ -210,8 +210,7 @@ pub(super) fn check_meta_variables(
     guar.map_or(Ok(()), Err)
 }
 
-/// Checks `lhs` as part of the LHS of a macro definition, extends `binders` with new binders, and
-/// sets `valid` to false in case of errors.
+/// Checks `lhs` as part of the LHS of a macro definition.
 ///
 /// Arguments:
 /// - `psess` is used to emit diagnostics and lints
@@ -306,8 +305,7 @@ fn get_binder_info<'a>(
     binders.get(&name).or_else(|| macros.find_map(|state| state.binders.get(&name)))
 }
 
-/// Checks `rhs` as part of the RHS of a macro definition and sets `valid` to false in case of
-/// errors.
+/// Checks `rhs` as part of the RHS of a macro definition.
 ///
 /// Arguments:
 /// - `psess` is used to emit diagnostics and lints
@@ -372,7 +370,7 @@ enum NestedMacroState {
 }
 
 /// Checks `tts` as part of the RHS of a macro definition, tries to recognize nested macro
-/// definitions, and sets `valid` to false in case of errors.
+/// definitions.
 ///
 /// Arguments:
 /// - `psess` is used to emit diagnostics and lints
@@ -491,8 +489,7 @@ fn check_nested_occurrences(
     }
 }
 
-/// Checks the body of nested macro, returns where the check stopped, and sets `valid` to false in
-/// case of errors.
+/// Checks the body of nested macro, returns where the check stopped.
 ///
 /// The token trees are checked as long as they look like a list of (LHS) => {RHS} token trees. This
 /// check is a best-effort to detect a macro definition. It returns the position in `tts` where we
diff --git a/compiler/rustc_feature/src/removed.rs b/compiler/rustc_feature/src/removed.rs
index 32115535e99..539d67e0b6b 100644
--- a/compiler/rustc_feature/src/removed.rs
+++ b/compiler/rustc_feature/src/removed.rs
@@ -101,6 +101,10 @@ declare_features! (
      Some("never properly implemented; requires significant design work"), 127655),
     /// Allows deriving traits as per `SmartPointer` specification
     (removed, derive_smart_pointer, "1.84.0", Some(123430), Some("replaced by `CoercePointee`"), 131284),
+    /// Tells rustdoc to automatically generate `#[doc(cfg(...))]`.
+    (removed, doc_auto_cfg, "CURRENT_RUSTC_VERSION", Some(43781), Some("merged into `doc_cfg`"), 138907),
+    /// Allows `#[doc(cfg_hide(...))]`.
+    (removed, doc_cfg_hide, "CURRENT_RUSTC_VERSION", Some(43781), Some("merged into `doc_cfg`"), 138907),
     /// Allows using `#[doc(keyword = "...")]`.
     (removed, doc_keyword, "1.58.0", Some(51315),
      Some("merged into `#![feature(rustdoc_internals)]`"), 90420),
diff --git a/compiler/rustc_feature/src/unstable.rs b/compiler/rustc_feature/src/unstable.rs
index 6ef0df72365..e63f29a9570 100644
--- a/compiler/rustc_feature/src/unstable.rs
+++ b/compiler/rustc_feature/src/unstable.rs
@@ -472,12 +472,8 @@ declare_features! (
     (incomplete, deref_patterns, "1.79.0", Some(87121)),
     /// Allows deriving the From trait on single-field structs.
     (unstable, derive_from, "1.91.0", Some(144889)),
-    /// Tells rustdoc to automatically generate `#[doc(cfg(...))]`.
-    (unstable, doc_auto_cfg, "1.58.0", Some(43781)),
     /// Allows `#[doc(cfg(...))]`.
     (unstable, doc_cfg, "1.21.0", Some(43781)),
-    /// Allows `#[doc(cfg_hide(...))]`.
-    (unstable, doc_cfg_hide, "1.57.0", Some(43781)),
     /// Allows `#[doc(masked)]`.
     (unstable, doc_masked, "1.21.0", Some(44027)),
     /// Allows features to allow target_feature to better interact with traits.
@@ -554,7 +550,7 @@ declare_features! (
     /// Allows fused `loop`/`match` for direct intraprocedural jumps.
     (incomplete, loop_match, "1.90.0", Some(132306)),
     /// Allow `macro_rules!` attribute rules
-    (unstable, macro_attr, "1.91.0", Some(83527)),
+    (unstable, macro_attr, "1.91.0", Some(143547)),
     /// Allow `macro_rules!` derive rules
     (unstable, macro_derive, "1.91.0", Some(143549)),
     /// Give access to additional metadata about declarative macro meta-variables.
diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs
index 493236718a8..bc1c47e95c3 100644
--- a/compiler/rustc_hir/src/hir.rs
+++ b/compiler/rustc_hir/src/hir.rs
@@ -1298,10 +1298,7 @@ impl AttributeExt for Attribute {
     #[inline]
     fn path_matches(&self, name: &[Symbol]) -> bool {
         match &self {
-            Attribute::Unparsed(n) => {
-                n.path.segments.len() == name.len()
-                    && n.path.segments.iter().zip(name).all(|(s, n)| s.name == *n)
-            }
+            Attribute::Unparsed(n) => n.path.segments.iter().map(|ident| &ident.name).eq(name),
             _ => false,
         }
     }
diff --git a/compiler/rustc_hir/src/lang_items.rs b/compiler/rustc_hir/src/lang_items.rs
index 2e099a97b65..311cf8f995c 100644
--- a/compiler/rustc_hir/src/lang_items.rs
+++ b/compiler/rustc_hir/src/lang_items.rs
@@ -440,6 +440,7 @@ language_item_table! {
 
     // Reborrowing related lang-items
     Reborrow,                sym::reborrow,            reborrow,                   Target::Trait,          GenericRequirement::Exact(0);
+    CoerceShared,            sym::coerce_shared,       coerce_shared,              Target::Trait,          GenericRequirement::Exact(0);
 }
 
 /// The requirement imposed on the generics of a lang item
diff --git a/compiler/rustc_hir/src/lints.rs b/compiler/rustc_hir/src/lints.rs
index b7a0a6a0c19..c9de6f6b5d5 100644
--- a/compiler/rustc_hir/src/lints.rs
+++ b/compiler/rustc_hir/src/lints.rs
@@ -31,6 +31,12 @@ pub struct AttributeLint<Id> {
 
 #[derive(Clone, Debug, HashStable_Generic)]
 pub enum AttributeLintKind {
+    /// Copy of `IllFormedAttributeInput`
+    /// specifically for the `invalid_macro_export_arguments` lint until that is removed,
+    /// see <https://github.com/rust-lang/rust/pull/143857#issuecomment-3079175663>
+    InvalidMacroExportArguments {
+        suggestions: Vec<String>,
+    },
     UnusedDuplicate {
         this: Span,
         other: Span,
@@ -41,13 +47,8 @@ pub enum AttributeLintKind {
     },
     EmptyAttribute {
         first_span: Span,
-    },
-
-    /// Copy of `IllFormedAttributeInput`
-    /// specifically for the `invalid_macro_export_arguments` lint until that is removed,
-    /// see <https://github.com/rust-lang/rust/pull/143857#issuecomment-3079175663>
-    InvalidMacroExportArguments {
-        suggestions: Vec<String>,
+        attr_path: AttrPath,
+        valid_without_list: bool,
     },
     InvalidTarget {
         name: AttrPath,
diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs
index b72e743f95b..02baaec3713 100644
--- a/compiler/rustc_hir_analysis/src/collect.rs
+++ b/compiler/rustc_hir_analysis/src/collect.rs
@@ -1140,7 +1140,7 @@ fn recover_infer_ret_ty<'tcx>(
     // recursive function definition to leak out into the fn sig.
     let mut recovered_ret_ty = None;
     if let Some(suggestable_ret_ty) = ret_ty.make_suggestable(tcx, false, None) {
-        diag.span_suggestion(
+        diag.span_suggestion_verbose(
             infer_ret_ty.span,
             "replace with the correct return type",
             suggestable_ret_ty,
@@ -1152,7 +1152,7 @@ fn recover_infer_ret_ty<'tcx>(
         tcx.param_env(def_id),
         ret_ty,
     ) {
-        diag.span_suggestion(
+        diag.span_suggestion_verbose(
             infer_ret_ty.span,
             "replace with an appropriate return type",
             sugg,
diff --git a/compiler/rustc_hir_analysis/src/constrained_generic_params.rs b/compiler/rustc_hir_analysis/src/constrained_generic_params.rs
index 366b3943a05..2a633810cd7 100644
--- a/compiler/rustc_hir_analysis/src/constrained_generic_params.rs
+++ b/compiler/rustc_hir_analysis/src/constrained_generic_params.rs
@@ -4,7 +4,7 @@ use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable, TypeSuperVisitable, TypeV
 use rustc_span::Span;
 use tracing::debug;
 
-#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
 pub(crate) struct Parameter(pub u32);
 
 impl From<ty::ParamTy> for Parameter {
diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/cmse.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/cmse.rs
index 81deb35920a..0a41659ec66 100644
--- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/cmse.rs
+++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/cmse.rs
@@ -134,11 +134,12 @@ fn is_valid_cmse_inputs<'tcx>(
 
     // this type is only used for layout computation, which does not rely on regions
     let fn_sig = tcx.instantiate_bound_regions_with_erased(fn_sig);
+    let fn_sig = tcx.erase_and_anonymize_regions(fn_sig);
 
     for (index, ty) in fn_sig.inputs().iter().enumerate() {
         let layout = tcx.layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(*ty))?;
 
-        let align = layout.layout.align().abi.bytes();
+        let align = layout.layout.align().bytes();
         let size = layout.layout.size().bytes();
 
         accum += size;
diff --git a/compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs b/compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs
index b38639ed8c6..13c744ab461 100644
--- a/compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs
+++ b/compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs
@@ -275,7 +275,7 @@ fn check_duplicate_params<'tcx>(
     span: Span,
 ) -> Result<(), ErrorGuaranteed> {
     let mut base_params = cgp::parameters_for(tcx, parent_args, true);
-    base_params.sort_by_key(|param| param.0);
+    base_params.sort_unstable();
     if let (_, [duplicate, ..]) = base_params.partition_dedup() {
         let param = impl1_args[duplicate.0 as usize];
         return Err(tcx
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
index 7ca8580e098..c8943d4634e 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
@@ -2803,9 +2803,7 @@ impl<'a, 'b, 'tcx> ArgMatchingCtxt<'a, 'b, 'tcx> {
         if let Some((assoc, fn_sig)) = self.similar_assoc(call_name)
             && fn_sig.inputs()[1..]
                 .iter()
-                .zip(input_types.iter())
-                .all(|(expected, found)| self.may_coerce(*expected, *found))
-            && fn_sig.inputs()[1..].len() == input_types.len()
+                .eq_by(input_types, |expected, found| self.may_coerce(*expected, found))
         {
             let assoc_name = assoc.name();
             err.span_suggestion_verbose(
diff --git a/compiler/rustc_hir_typeck/src/lib.rs b/compiler/rustc_hir_typeck/src/lib.rs
index 43a23822fd1..acc0481e457 100644
--- a/compiler/rustc_hir_typeck/src/lib.rs
+++ b/compiler/rustc_hir_typeck/src/lib.rs
@@ -5,6 +5,7 @@
 #![feature(box_patterns)]
 #![feature(if_let_guard)]
 #![feature(iter_intersperse)]
+#![feature(iter_order_by)]
 #![feature(never_type)]
 // tidy-alphabetical-end
 
diff --git a/compiler/rustc_hir_typeck/src/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs
index 024b9ee08c2..44602e62899 100644
--- a/compiler/rustc_hir_typeck/src/method/suggest.rs
+++ b/compiler/rustc_hir_typeck/src/method/suggest.rs
@@ -1914,9 +1914,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                         if let Some(ref args) = call_args
                             && fn_sig.inputs()[1..]
                                 .iter()
-                                .zip(args.into_iter())
-                                .all(|(expected, found)| self.may_coerce(*expected, *found))
-                            && fn_sig.inputs()[1..].len() == args.len()
+                                .eq_by(args, |expected, found| self.may_coerce(*expected, *found))
                         {
                             err.span_suggestion_verbose(
                                 item_name.span,
diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs
index 761a5c80918..c1bba0b0197 100644
--- a/compiler/rustc_interface/src/passes.rs
+++ b/compiler/rustc_interface/src/passes.rs
@@ -1122,18 +1122,6 @@ fn run_required_analyses(tcx: TyCtxt<'_>) {
 
     sess.time("layout_testing", || layout_test::test_layout(tcx));
     sess.time("abi_testing", || abi_test::test_abi(tcx));
-
-    // If `-Zvalidate-mir` is set, we also want to compute the final MIR for each item
-    // (either its `mir_for_ctfe` or `optimized_mir`) since that helps uncover any bugs
-    // in MIR optimizations that may only be reachable through codegen, or other codepaths
-    // that requires the optimized/ctfe MIR, coroutine bodies, or evaluating consts.
-    if tcx.sess.opts.unstable_opts.validate_mir {
-        sess.time("ensuring_final_MIR_is_computable", || {
-            tcx.par_hir_body_owners(|def_id| {
-                tcx.instance_mir(ty::InstanceKind::Item(def_id.into()));
-            });
-        });
-    }
 }
 
 /// Runs the type-checking, region checking and other miscellaneous analysis
@@ -1199,6 +1187,20 @@ fn analysis(tcx: TyCtxt<'_>, (): ()) {
         // we will fail to emit overlap diagnostics. Thus we invoke it here unconditionally.
         let _ = tcx.all_diagnostic_items(());
     });
+
+    // If `-Zvalidate-mir` is set, we also want to compute the final MIR for each item
+    // (either its `mir_for_ctfe` or `optimized_mir`) since that helps uncover any bugs
+    // in MIR optimizations that may only be reachable through codegen, or other codepaths
+    // that requires the optimized/ctfe MIR, coroutine bodies, or evaluating consts.
+    // Nevertheless, wait after type checking is finished, as optimizing code that does not
+    // type-check is very prone to ICEs.
+    if tcx.sess.opts.unstable_opts.validate_mir {
+        sess.time("ensuring_final_MIR_is_computable", || {
+            tcx.par_hir_body_owners(|def_id| {
+                tcx.instance_mir(ty::InstanceKind::Item(def_id.into()));
+            });
+        });
+    }
 }
 
 /// Runs the codegen backend, after which the AST and analysis can
diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs
index 800f5efee41..7e5186db4ea 100644
--- a/compiler/rustc_interface/src/tests.rs
+++ b/compiler/rustc_interface/src/tests.rs
@@ -765,7 +765,7 @@ fn test_unstable_options_tracking_hash() {
     tracked!(allow_features, Some(vec![String::from("lang_items")]));
     tracked!(always_encode_mir, true);
     tracked!(assume_incomplete_release, true);
-    tracked!(autodiff, vec![AutoDiff::Enable]);
+    tracked!(autodiff, vec![AutoDiff::Enable, AutoDiff::NoTT]);
     tracked!(binary_dep_depinfo, true);
     tracked!(box_noalias, false);
     tracked!(
diff --git a/compiler/rustc_interface/src/util.rs b/compiler/rustc_interface/src/util.rs
index 76ccd12797e..58ec72b5b45 100644
--- a/compiler/rustc_interface/src/util.rs
+++ b/compiler/rustc_interface/src/util.rs
@@ -542,6 +542,7 @@ pub fn build_output_filenames(attrs: &[ast::Attribute], sess: &Session) -> Outpu
                 stem,
                 None,
                 sess.io.temps_dir.clone(),
+                sess.opts.unstable_opts.split_dwarf_out_dir.clone(),
                 sess.opts.cg.extra_filename.clone(),
                 sess.opts.output_types.clone(),
             )
@@ -571,6 +572,7 @@ pub fn build_output_filenames(attrs: &[ast::Attribute], sess: &Session) -> Outpu
                 out_filestem,
                 ofile,
                 sess.io.temps_dir.clone(),
+                sess.opts.unstable_opts.split_dwarf_out_dir.clone(),
                 sess.opts.cg.extra_filename.clone(),
                 sess.opts.output_types.clone(),
             )
diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
index 013d68fa3e4..2e9fd6754f1 100644
--- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
@@ -569,25 +569,43 @@ extern "C" LLVMRustResult LLVMRustOptimize(
   }
 
   std::optional<PGOOptions> PGOOpt;
+#if LLVM_VERSION_LT(22, 0)
   auto FS = vfs::getRealFileSystem();
+#endif
   if (PGOGenPath) {
     assert(!PGOUsePath && !PGOSampleUsePath);
     PGOOpt = PGOOptions(
+#if LLVM_VERSION_GE(22, 0)
+        PGOGenPath, "", "", "", PGOOptions::IRInstr, PGOOptions::NoCSAction,
+#else
         PGOGenPath, "", "", "", FS, PGOOptions::IRInstr, PGOOptions::NoCSAction,
+#endif
         PGOOptions::ColdFuncOpt::Default, DebugInfoForProfiling);
   } else if (PGOUsePath) {
     assert(!PGOSampleUsePath);
     PGOOpt = PGOOptions(
+#if LLVM_VERSION_GE(22, 0)
+        PGOUsePath, "", "", "", PGOOptions::IRUse, PGOOptions::NoCSAction,
+#else
         PGOUsePath, "", "", "", FS, PGOOptions::IRUse, PGOOptions::NoCSAction,
+#endif
         PGOOptions::ColdFuncOpt::Default, DebugInfoForProfiling);
   } else if (PGOSampleUsePath) {
     PGOOpt =
+#if LLVM_VERSION_GE(22, 0)
+        PGOOptions(PGOSampleUsePath, "", "", "", PGOOptions::SampleUse,
+#else
         PGOOptions(PGOSampleUsePath, "", "", "", FS, PGOOptions::SampleUse,
+#endif
                    PGOOptions::NoCSAction, PGOOptions::ColdFuncOpt::Default,
                    DebugInfoForProfiling);
   } else if (DebugInfoForProfiling) {
     PGOOpt = PGOOptions(
+#if LLVM_VERSION_GE(22, 0)
+        "", "", "", "", PGOOptions::NoAction, PGOOptions::NoCSAction,
+#else
         "", "", "", "", FS, PGOOptions::NoAction, PGOOptions::NoCSAction,
+#endif
         PGOOptions::ColdFuncOpt::Default, DebugInfoForProfiling);
   }
 
diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
index 9953f5e1731..4a778125918 100644
--- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
@@ -990,14 +990,6 @@ extern "C" void LLVMRustGlobalAddMetadata(LLVMValueRef Global, unsigned Kind,
   unwrap<GlobalObject>(Global)->addMetadata(Kind, *unwrap<MDNode>(MD));
 }
 
-extern "C" LLVMDIBuilderRef LLVMRustDIBuilderCreate(LLVMModuleRef M) {
-  return wrap(new DIBuilder(*unwrap(M)));
-}
-
-extern "C" void LLVMRustDIBuilderDispose(LLVMDIBuilderRef Builder) {
-  delete unwrap(Builder);
-}
-
 extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateCompileUnit(
     LLVMDIBuilderRef Builder, unsigned Lang, LLVMMetadataRef FileRef,
     const char *Producer, size_t ProducerLen, bool isOptimized,
@@ -1129,51 +1121,6 @@ extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateStaticVariable(
   return wrap(VarExpr);
 }
 
-extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateVariable(
-    LLVMDIBuilderRef Builder, unsigned Tag, LLVMMetadataRef Scope,
-    const char *Name, size_t NameLen, LLVMMetadataRef File, unsigned LineNo,
-    LLVMMetadataRef Ty, bool AlwaysPreserve, LLVMDIFlags Flags, unsigned ArgNo,
-    uint32_t AlignInBits) {
-  if (Tag == 0x100) { // DW_TAG_auto_variable
-    return wrap(unwrap(Builder)->createAutoVariable(
-        unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
-        unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty), AlwaysPreserve,
-        fromRust(Flags), AlignInBits));
-  } else {
-    return wrap(unwrap(Builder)->createParameterVariable(
-        unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), ArgNo,
-        unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty), AlwaysPreserve,
-        fromRust(Flags)));
-  }
-}
-
-extern "C" LLVMMetadataRef
-LLVMRustDIBuilderGetOrCreateSubrange(LLVMDIBuilderRef Builder, int64_t Lo,
-                                     int64_t Count) {
-  return wrap(unwrap(Builder)->getOrCreateSubrange(Lo, Count));
-}
-
-extern "C" LLVMMetadataRef
-LLVMRustDIBuilderGetOrCreateArray(LLVMDIBuilderRef Builder,
-                                  LLVMMetadataRef *Ptr, unsigned Count) {
-  Metadata **DataValue = unwrap(Ptr);
-  return wrap(unwrap(Builder)
-                  ->getOrCreateArray(ArrayRef<Metadata *>(DataValue, Count))
-                  .get());
-}
-
-extern "C" void
-LLVMRustDIBuilderInsertDeclareAtEnd(LLVMDIBuilderRef Builder, LLVMValueRef V,
-                                    LLVMMetadataRef VarInfo, uint64_t *AddrOps,
-                                    unsigned AddrOpsCount, LLVMMetadataRef DL,
-                                    LLVMBasicBlockRef InsertAtEnd) {
-  unwrap(Builder)->insertDeclare(
-      unwrap(V), unwrap<DILocalVariable>(VarInfo),
-      unwrap(Builder)->createExpression(
-          llvm::ArrayRef<uint64_t>(AddrOps, AddrOpsCount)),
-      DebugLoc(cast<MDNode>(unwrap(DL))), unwrap(InsertAtEnd));
-}
-
 extern "C" LLVMMetadataRef
 LLVMRustDIBuilderCreateEnumerator(LLVMDIBuilderRef Builder, const char *Name,
                                   size_t NameLen, const uint64_t Value[2],
@@ -1865,3 +1812,15 @@ extern "C" void LLVMRustSetNoSanitizeHWAddress(LLVMValueRef Global) {
   MD.NoHWAddress = true;
   GV.setSanitizerMetadata(MD);
 }
+
+#ifdef ENZYME
+extern "C" {
+extern llvm::cl::opt<unsigned> EnzymeMaxTypeDepth;
+}
+
+extern "C" size_t LLVMRustEnzymeGetMaxTypeDepth() { return EnzymeMaxTypeDepth; }
+#else
+extern "C" size_t LLVMRustEnzymeGetMaxTypeDepth() {
+  return 6; // Default fallback depth
+}
+#endif
diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs
index 0c8d1f32e99..b895feb9062 100644
--- a/compiler/rustc_metadata/src/rmeta/decoder.rs
+++ b/compiler/rustc_metadata/src/rmeta/decoder.rs
@@ -1555,7 +1555,7 @@ impl<'a> CrateMetadataRef<'a> {
     }
 
     #[inline]
-    fn def_path_hash_to_def_index(self, hash: DefPathHash) -> DefIndex {
+    fn def_path_hash_to_def_index(self, hash: DefPathHash) -> Option<DefIndex> {
         self.def_path_hash_map.def_path_hash_to_def_index(&hash)
     }
 
diff --git a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
index 11fef3be5d0..df3add316ec 100644
--- a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
+++ b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
@@ -691,8 +691,8 @@ fn provide_cstore_hooks(providers: &mut Providers) {
             .get(&stable_crate_id)
             .unwrap_or_else(|| bug!("uninterned StableCrateId: {stable_crate_id:?}"));
         assert_ne!(cnum, LOCAL_CRATE);
-        let def_index = cstore.get_crate_data(cnum).def_path_hash_to_def_index(hash);
-        DefId { krate: cnum, index: def_index }
+        let def_index = cstore.get_crate_data(cnum).def_path_hash_to_def_index(hash)?;
+        Some(DefId { krate: cnum, index: def_index })
     };
 
     providers.hooks.expn_hash_to_expn_id = |tcx, cnum, index_guess, hash| {
diff --git a/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs b/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs
index f3917b55782..a17b3e1047d 100644
--- a/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs
+++ b/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs
@@ -12,11 +12,12 @@ pub(crate) enum DefPathHashMapRef<'tcx> {
 
 impl DefPathHashMapRef<'_> {
     #[inline]
-    pub(crate) fn def_path_hash_to_def_index(&self, def_path_hash: &DefPathHash) -> DefIndex {
+    pub(crate) fn def_path_hash_to_def_index(
+        &self,
+        def_path_hash: &DefPathHash,
+    ) -> Option<DefIndex> {
         match *self {
-            DefPathHashMapRef::OwnedFromMetadata(ref map) => {
-                map.get(&def_path_hash.local_hash()).unwrap()
-            }
+            DefPathHashMapRef::OwnedFromMetadata(ref map) => map.get(&def_path_hash.local_hash()),
             DefPathHashMapRef::BorrowedFromTcx(_) => {
                 panic!("DefPathHashMap::BorrowedFromTcx variant only exists for serialization")
             }
diff --git a/compiler/rustc_middle/src/error.rs b/compiler/rustc_middle/src/error.rs
index 0be26712b9c..3ff9eea8cc4 100644
--- a/compiler/rustc_middle/src/error.rs
+++ b/compiler/rustc_middle/src/error.rs
@@ -37,7 +37,6 @@ pub(crate) struct OpaqueHiddenTypeMismatch<'tcx> {
     pub sub: TypeMismatchReason,
 }
 
-// FIXME(autodiff): I should get used somewhere
 #[derive(Diagnostic)]
 #[diag(middle_unsupported_union)]
 pub struct UnsupportedUnion {
diff --git a/compiler/rustc_middle/src/hooks/mod.rs b/compiler/rustc_middle/src/hooks/mod.rs
index 9d2f0a45237..dc6a3334a4c 100644
--- a/compiler/rustc_middle/src/hooks/mod.rs
+++ b/compiler/rustc_middle/src/hooks/mod.rs
@@ -77,7 +77,7 @@ declare_hooks! {
     /// session, if it still exists. This is used during incremental compilation to
     /// turn a deserialized `DefPathHash` into its current `DefId`.
     /// Will fetch a DefId from a DefPathHash for a foreign crate.
-    hook def_path_hash_to_def_id_extern(hash: DefPathHash, stable_crate_id: StableCrateId) -> DefId;
+    hook def_path_hash_to_def_id_extern(hash: DefPathHash, stable_crate_id: StableCrateId) -> Option<DefId>;
 
     /// Returns `true` if we should codegen an instance in the local crate, or returns `false` if we
     /// can just link to the upstream crate and therefore don't need a mono item.
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
index a42af7bb3e3..fe3fa024cd5 100644
--- a/compiler/rustc_middle/src/ty/context.rs
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -2012,7 +2012,7 @@ impl<'tcx> TyCtxt<'tcx> {
         if stable_crate_id == self.stable_crate_id(LOCAL_CRATE) {
             Some(self.untracked.definitions.read().local_def_path_hash_to_def_id(hash)?.to_def_id())
         } else {
-            Some(self.def_path_hash_to_def_id_extern(hash, stable_crate_id))
+            self.def_path_hash_to_def_id_extern(hash, stable_crate_id)
         }
     }
 
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index 0ffef393a33..ce4de6b95e0 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -25,6 +25,7 @@ pub use generic_args::{GenericArgKind, TermKind, *};
 pub use generics::*;
 pub use intrinsic::IntrinsicDef;
 use rustc_abi::{Align, FieldIdx, Integer, IntegerType, ReprFlags, ReprOptions, VariantIdx};
+use rustc_ast::expand::typetree::{FncTree, Kind, Type, TypeTree};
 use rustc_ast::node_id::NodeMap;
 pub use rustc_ast_ir::{Movability, Mutability, try_visit};
 use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
@@ -62,7 +63,7 @@ pub use rustc_type_ir::solve::SizedTraitKind;
 pub use rustc_type_ir::*;
 #[allow(hidden_glob_reexports, unused_imports)]
 use rustc_type_ir::{InferCtxtLike, Interner};
-use tracing::{debug, instrument};
+use tracing::{debug, instrument, trace};
 pub use vtable::*;
 use {rustc_ast as ast, rustc_hir as hir};
 
@@ -2216,3 +2217,225 @@ pub struct DestructuredConst<'tcx> {
     pub variant: Option<VariantIdx>,
     pub fields: &'tcx [ty::Const<'tcx>],
 }
+
+/// Generate TypeTree information for autodiff.
+/// This function creates TypeTree metadata that describes the memory layout
+/// of function parameters and return types for Enzyme autodiff.
+pub fn fnc_typetrees<'tcx>(tcx: TyCtxt<'tcx>, fn_ty: Ty<'tcx>) -> FncTree {
+    // Check if TypeTrees are disabled via NoTT flag
+    if tcx.sess.opts.unstable_opts.autodiff.contains(&rustc_session::config::AutoDiff::NoTT) {
+        return FncTree { args: vec![], ret: TypeTree::new() };
+    }
+
+    // Check if this is actually a function type
+    if !fn_ty.is_fn() {
+        return FncTree { args: vec![], ret: TypeTree::new() };
+    }
+
+    // Get the function signature
+    let fn_sig = fn_ty.fn_sig(tcx);
+    let sig = tcx.instantiate_bound_regions_with_erased(fn_sig);
+
+    // Create TypeTrees for each input parameter
+    let mut args = vec![];
+    for ty in sig.inputs().iter() {
+        let type_tree = typetree_from_ty(tcx, *ty);
+        args.push(type_tree);
+    }
+
+    // Create TypeTree for return type
+    let ret = typetree_from_ty(tcx, sig.output());
+
+    FncTree { args, ret }
+}
+
+/// Generate TypeTree for a specific type.
+/// This function analyzes a Rust type and creates appropriate TypeTree metadata.
+pub fn typetree_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> TypeTree {
+    let mut visited = Vec::new();
+    typetree_from_ty_inner(tcx, ty, 0, &mut visited)
+}
+
+/// Maximum recursion depth for TypeTree generation to prevent stack overflow
+/// from pathological deeply nested types. Combined with cycle detection.
+const MAX_TYPETREE_DEPTH: usize = 6;
+
+/// Internal recursive function for TypeTree generation with cycle detection and depth limiting.
+fn typetree_from_ty_inner<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    ty: Ty<'tcx>,
+    depth: usize,
+    visited: &mut Vec<Ty<'tcx>>,
+) -> TypeTree {
+    if depth >= MAX_TYPETREE_DEPTH {
+        trace!("typetree depth limit {} reached for type: {}", MAX_TYPETREE_DEPTH, ty);
+        return TypeTree::new();
+    }
+
+    if visited.contains(&ty) {
+        return TypeTree::new();
+    }
+
+    visited.push(ty);
+    let result = typetree_from_ty_impl(tcx, ty, depth, visited);
+    visited.pop();
+    result
+}
+
+/// Implementation of TypeTree generation logic.
+fn typetree_from_ty_impl<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    ty: Ty<'tcx>,
+    depth: usize,
+    visited: &mut Vec<Ty<'tcx>>,
+) -> TypeTree {
+    typetree_from_ty_impl_inner(tcx, ty, depth, visited, false)
+}
+
+/// Internal implementation with context about whether this is for a reference target.
+fn typetree_from_ty_impl_inner<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    ty: Ty<'tcx>,
+    depth: usize,
+    visited: &mut Vec<Ty<'tcx>>,
+    is_reference_target: bool,
+) -> TypeTree {
+    if ty.is_scalar() {
+        let (kind, size) = if ty.is_integral() || ty.is_char() || ty.is_bool() {
+            (Kind::Integer, ty.primitive_size(tcx).bytes_usize())
+        } else if ty.is_floating_point() {
+            match ty {
+                x if x == tcx.types.f16 => (Kind::Half, 2),
+                x if x == tcx.types.f32 => (Kind::Float, 4),
+                x if x == tcx.types.f64 => (Kind::Double, 8),
+                x if x == tcx.types.f128 => (Kind::F128, 16),
+                _ => (Kind::Integer, 0),
+            }
+        } else {
+            (Kind::Integer, 0)
+        };
+
+        // Use offset 0 for scalars that are direct targets of references (like &f64)
+        // Use offset -1 for scalars used directly (like function return types)
+        let offset = if is_reference_target && !ty.is_array() { 0 } else { -1 };
+        return TypeTree(vec![Type { offset, size, kind, child: TypeTree::new() }]);
+    }
+
+    if ty.is_ref() || ty.is_raw_ptr() || ty.is_box() {
+        let inner_ty = if let Some(inner) = ty.builtin_deref(true) {
+            inner
+        } else {
+            return TypeTree::new();
+        };
+
+        let child = typetree_from_ty_impl_inner(tcx, inner_ty, depth + 1, visited, true);
+        return TypeTree(vec![Type {
+            offset: -1,
+            size: tcx.data_layout.pointer_size().bytes_usize(),
+            kind: Kind::Pointer,
+            child,
+        }]);
+    }
+
+    if ty.is_array() {
+        if let ty::Array(element_ty, len_const) = ty.kind() {
+            let len = len_const.try_to_target_usize(tcx).unwrap_or(0);
+            if len == 0 {
+                return TypeTree::new();
+            }
+            let element_tree =
+                typetree_from_ty_impl_inner(tcx, *element_ty, depth + 1, visited, false);
+            let mut types = Vec::new();
+            for elem_type in &element_tree.0 {
+                types.push(Type {
+                    offset: -1,
+                    size: elem_type.size,
+                    kind: elem_type.kind,
+                    child: elem_type.child.clone(),
+                });
+            }
+
+            return TypeTree(types);
+        }
+    }
+
+    if ty.is_slice() {
+        if let ty::Slice(element_ty) = ty.kind() {
+            let element_tree =
+                typetree_from_ty_impl_inner(tcx, *element_ty, depth + 1, visited, false);
+            return element_tree;
+        }
+    }
+
+    if let ty::Tuple(tuple_types) = ty.kind() {
+        if tuple_types.is_empty() {
+            return TypeTree::new();
+        }
+
+        let mut types = Vec::new();
+        let mut current_offset = 0;
+
+        for tuple_ty in tuple_types.iter() {
+            let element_tree =
+                typetree_from_ty_impl_inner(tcx, tuple_ty, depth + 1, visited, false);
+
+            let element_layout = tcx
+                .layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(tuple_ty))
+                .ok()
+                .map(|layout| layout.size.bytes_usize())
+                .unwrap_or(0);
+
+            for elem_type in &element_tree.0 {
+                types.push(Type {
+                    offset: if elem_type.offset == -1 {
+                        current_offset as isize
+                    } else {
+                        current_offset as isize + elem_type.offset
+                    },
+                    size: elem_type.size,
+                    kind: elem_type.kind,
+                    child: elem_type.child.clone(),
+                });
+            }
+
+            current_offset += element_layout;
+        }
+
+        return TypeTree(types);
+    }
+
+    if let ty::Adt(adt_def, args) = ty.kind() {
+        if adt_def.is_struct() {
+            let struct_layout =
+                tcx.layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(ty));
+            if let Ok(layout) = struct_layout {
+                let mut types = Vec::new();
+
+                for (field_idx, field_def) in adt_def.all_fields().enumerate() {
+                    let field_ty = field_def.ty(tcx, args);
+                    let field_tree =
+                        typetree_from_ty_impl_inner(tcx, field_ty, depth + 1, visited, false);
+
+                    let field_offset = layout.fields.offset(field_idx).bytes_usize();
+
+                    for elem_type in &field_tree.0 {
+                        types.push(Type {
+                            offset: if elem_type.offset == -1 {
+                                field_offset as isize
+                            } else {
+                                field_offset as isize + elem_type.offset
+                            },
+                            size: elem_type.size,
+                            kind: elem_type.kind,
+                            child: elem_type.child.clone(),
+                        });
+                    }
+                }
+
+                return TypeTree(types);
+            }
+        }
+    }
+
+    TypeTree::new()
+}
diff --git a/compiler/rustc_middle/src/ty/vtable.rs b/compiler/rustc_middle/src/ty/vtable.rs
index e2f09fdcb4b..a3e9054fdcb 100644
--- a/compiler/rustc_middle/src/ty/vtable.rs
+++ b/compiler/rustc_middle/src/ty/vtable.rs
@@ -104,7 +104,7 @@ pub(super) fn vtable_allocation_provider<'tcx>(
         .expect("failed to build vtable representation");
     assert!(layout.is_sized(), "can't create a vtable for an unsized type");
     let size = layout.size.bytes();
-    let align = layout.align.abi.bytes();
+    let align = layout.align.bytes();
 
     let ptr_size = tcx.data_layout.pointer_size();
     let ptr_align = tcx.data_layout.pointer_align().abi;
diff --git a/compiler/rustc_mir_build/src/check_unsafety.rs b/compiler/rustc_mir_build/src/check_unsafety.rs
index b5e165c7517..195d45c2c4c 100644
--- a/compiler/rustc_mir_build/src/check_unsafety.rs
+++ b/compiler/rustc_mir_build/src/check_unsafety.rs
@@ -554,6 +554,21 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
                     visit::walk_expr(self, &self.thir[arg]);
                     return;
                 }
+
+                // Secondly, we allow raw borrows of union field accesses. Peel
+                // any of those off, and recurse normally on the LHS, which should
+                // reject any unsafe operations within.
+                let mut peeled = arg;
+                while let ExprKind::Scope { value: arg, .. } = self.thir[peeled].kind
+                    && let ExprKind::Field { lhs, name: _, variant_index: _ } = self.thir[arg].kind
+                    && let ty::Adt(def, _) = &self.thir[lhs].ty.kind()
+                    && def.is_union()
+                {
+                    peeled = lhs;
+                }
+                visit::walk_expr(self, &self.thir[peeled]);
+                // And return so we don't recurse directly onto the union field access(es).
+                return;
             }
             ExprKind::Deref { arg } => {
                 if let ExprKind::StaticRef { def_id, .. } | ExprKind::ThreadLocalRef(def_id) =
diff --git a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
index ae67bb5075e..3929a97eed8 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
@@ -1275,13 +1275,13 @@ fn report_non_exhaustive_match<'p, 'tcx>(
             if ty.is_ptr_sized_integral() {
                 if ty.inner() == cx.tcx.types.usize {
                     err.note(format!(
-                        "`{ty}` does not have a fixed maximum value, so half-open ranges are \
-                         necessary to match exhaustively",
+                        "`{ty}::MAX` is not treated as exhaustive, \
+                        so half-open ranges are necessary to match exhaustively",
                     ));
                 } else if ty.inner() == cx.tcx.types.isize {
                     err.note(format!(
-                        "`{ty}` does not have fixed minimum and maximum values, so half-open \
-                         ranges are necessary to match exhaustively",
+                        "`{ty}::MIN` and `{ty}::MAX` are not treated as exhaustive, \
+                        so half-open ranges are necessary to match exhaustively",
                     ));
                 }
             } else if ty.inner() == cx.tcx.types.str_ {
diff --git a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
index 5bd6fdcf485..35a21a2a834 100644
--- a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
+++ b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
@@ -3,6 +3,7 @@ use rustc_ast::InlineAsmOptions;
 use rustc_middle::mir::*;
 use rustc_middle::span_bug;
 use rustc_middle::ty::{self, TyCtxt, layout};
+use rustc_span::sym;
 use rustc_target::spec::PanicStrategy;
 
 /// A pass that runs which is targeted at ensuring that codegen guarantees about
@@ -33,6 +34,19 @@ impl<'tcx> crate::MirPass<'tcx> for AbortUnwindingCalls {
             return;
         }
 
+        // Represent whether this compilation target fundamentally doesn't
+        // support unwinding at all at an ABI level. If this the target has no
+        // support for unwinding then cleanup actions, for example, are all
+        // unnecessary and can be considered unreachable.
+        //
+        // Currently this is only true for wasm targets on panic=abort when the
+        // `exception-handling` target feature is disabled. In such a
+        // configuration it's illegal to emit exception-related instructions so
+        // it's not possible to unwind.
+        let target_supports_unwinding = !(tcx.sess.target.is_like_wasm
+            && tcx.sess.panic_strategy() == PanicStrategy::Abort
+            && !tcx.asm_target_features(def_id).contains(&sym::exception_handling));
+
         // Here we test for this function itself whether its ABI allows
         // unwinding or not.
         let body_ty = tcx.type_of(def_id).skip_binder();
@@ -54,12 +68,18 @@ impl<'tcx> crate::MirPass<'tcx> for AbortUnwindingCalls {
             let Some(terminator) = &mut block.terminator else { continue };
             let span = terminator.source_info.span;
 
-            // If we see an `UnwindResume` terminator inside a function that cannot unwind, we need
-            // to replace it with `UnwindTerminate`.
-            if let TerminatorKind::UnwindResume = &terminator.kind
-                && !body_can_unwind
-            {
-                terminator.kind = TerminatorKind::UnwindTerminate(UnwindTerminateReason::Abi);
+            // If we see an `UnwindResume` terminator inside a function then:
+            //
+            // * If the target doesn't support unwinding at all, then this is an
+            //   unreachable block.
+            // * If the body cannot unwind, we need to replace it with
+            //   `UnwindTerminate`.
+            if let TerminatorKind::UnwindResume = &terminator.kind {
+                if !target_supports_unwinding {
+                    terminator.kind = TerminatorKind::Unreachable;
+                } else if !body_can_unwind {
+                    terminator.kind = TerminatorKind::UnwindTerminate(UnwindTerminateReason::Abi);
+                }
             }
 
             if block.is_cleanup {
@@ -93,8 +113,9 @@ impl<'tcx> crate::MirPass<'tcx> for AbortUnwindingCalls {
                 _ => continue,
             };
 
-            if !call_can_unwind {
-                // If this function call can't unwind, then there's no need for it
+            if !call_can_unwind || !target_supports_unwinding {
+                // If this function call can't unwind, or if the target doesn't
+                // support unwinding at all, then there's no need for it
                 // to have a landing pad. This means that we can remove any cleanup
                 // registered for it (and turn it into `UnwindAction::Unreachable`).
                 let cleanup = block.terminator_mut().unwind_mut().unwrap();
diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
index 5c984984d3c..491e910ff6f 100644
--- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
+++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
@@ -476,7 +476,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
                 };
                 let val = match null_op {
                     NullOp::SizeOf if layout.is_sized() => layout.size.bytes(),
-                    NullOp::AlignOf if layout.is_sized() => layout.align.abi.bytes(),
+                    NullOp::AlignOf if layout.is_sized() => layout.align.bytes(),
                     NullOp::OffsetOf(fields) => self
                         .ecx
                         .tcx
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index ebec3d12500..29f6879aacd 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -618,7 +618,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 }
                 let val = match null_op {
                     NullOp::SizeOf => arg_layout.size.bytes(),
-                    NullOp::AlignOf => arg_layout.align.abi.bytes(),
+                    NullOp::AlignOf => arg_layout.align.bytes(),
                     NullOp::OffsetOf(fields) => self
                         .ecx
                         .tcx
diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs
index aaacc5866a2..5fffba55f17 100644
--- a/compiler/rustc_mir_transform/src/known_panics_lint.rs
+++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs
@@ -609,7 +609,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                 let op_layout = self.ecx.layout_of(ty).ok()?;
                 let val = match null_op {
                     NullOp::SizeOf => op_layout.size.bytes(),
-                    NullOp::AlignOf => op_layout.align.abi.bytes(),
+                    NullOp::AlignOf => op_layout.align.bytes(),
                     NullOp::OffsetOf(fields) => self
                         .tcx
                         .offset_of_subfield(self.typing_env, op_layout, fields.iter())
diff --git a/compiler/rustc_mir_transform/src/patch.rs b/compiler/rustc_mir_transform/src/patch.rs
index d831ab50b1a..cc8ea76011b 100644
--- a/compiler/rustc_mir_transform/src/patch.rs
+++ b/compiler/rustc_mir_transform/src/patch.rs
@@ -11,6 +11,8 @@ use tracing::debug;
 /// once with `apply`. This is useful for MIR transformation passes.
 pub(crate) struct MirPatch<'tcx> {
     term_patch_map: FxHashMap<BasicBlock, TerminatorKind<'tcx>>,
+    /// Set of statements that should be replaced by `Nop`.
+    nop_statements: Vec<Location>,
     new_blocks: Vec<BasicBlockData<'tcx>>,
     new_statements: Vec<(Location, StatementKind<'tcx>)>,
     new_locals: Vec<LocalDecl<'tcx>>,
@@ -33,6 +35,7 @@ impl<'tcx> MirPatch<'tcx> {
     pub(crate) fn new(body: &Body<'tcx>) -> Self {
         let mut result = MirPatch {
             term_patch_map: Default::default(),
+            nop_statements: vec![],
             new_blocks: vec![],
             new_statements: vec![],
             new_locals: vec![],
@@ -212,6 +215,15 @@ impl<'tcx> MirPatch<'tcx> {
         self.term_patch_map.insert(block, new);
     }
 
+    /// Mark given statement to be replaced by a `Nop`.
+    ///
+    /// This method only works on statements from the initial body, and cannot be used to remove
+    /// statements from `add_statement` or `add_assign`.
+    #[tracing::instrument(level = "debug", skip(self))]
+    pub(crate) fn nop_statement(&mut self, loc: Location) {
+        self.nop_statements.push(loc);
+    }
+
     /// Queues the insertion of a statement at a given location. The statement
     /// currently at that location, and all statements that follow, are shifted
     /// down. If multiple statements are queued for addition at the same
@@ -257,11 +269,8 @@ impl<'tcx> MirPatch<'tcx> {
         bbs.extend(self.new_blocks);
         body.local_decls.extend(self.new_locals);
 
-        // The order in which we patch terminators does not change the result.
-        #[allow(rustc::potential_query_instability)]
-        for (src, patch) in self.term_patch_map {
-            debug!("MirPatch: patching block {:?}", src);
-            bbs[src].terminator_mut().kind = patch;
+        for loc in self.nop_statements {
+            bbs[loc.block].statements[loc.statement_index].make_nop();
         }
 
         let mut new_statements = self.new_statements;
@@ -285,6 +294,17 @@ impl<'tcx> MirPatch<'tcx> {
                 .insert(loc.statement_index, Statement::new(source_info, stmt));
             delta += 1;
         }
+
+        // The order in which we patch terminators does not change the result.
+        #[allow(rustc::potential_query_instability)]
+        for (src, patch) in self.term_patch_map {
+            debug!("MirPatch: patching block {:?}", src);
+            let bb = &mut bbs[src];
+            if let TerminatorKind::Unreachable = patch {
+                bb.statements.clear();
+            }
+            bb.terminator_mut().kind = patch;
+        }
     }
 
     fn source_info_for_index(data: &BasicBlockData<'_>, loc: Location) -> SourceInfo {
diff --git a/compiler/rustc_mir_transform/src/simplify_branches.rs b/compiler/rustc_mir_transform/src/simplify_branches.rs
index 886f4d6e509..ed94a058ec6 100644
--- a/compiler/rustc_mir_transform/src/simplify_branches.rs
+++ b/compiler/rustc_mir_transform/src/simplify_branches.rs
@@ -2,6 +2,8 @@ use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
 use tracing::trace;
 
+use crate::patch::MirPatch;
+
 pub(super) enum SimplifyConstCondition {
     AfterConstProp,
     Final,
@@ -19,8 +21,10 @@ impl<'tcx> crate::MirPass<'tcx> for SimplifyConstCondition {
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         trace!("Running SimplifyConstCondition on {:?}", body.source);
         let typing_env = body.typing_env(tcx);
-        'blocks: for block in body.basic_blocks_mut() {
-            for stmt in block.statements.iter_mut() {
+        let mut patch = MirPatch::new(body);
+
+        'blocks: for (bb, block) in body.basic_blocks.iter_enumerated() {
+            for (statement_index, stmt) in block.statements.iter().enumerate() {
                 // Simplify `assume` of a known value: either a NOP or unreachable.
                 if let StatementKind::Intrinsic(box ref intrinsic) = stmt.kind
                     && let NonDivergingIntrinsic::Assume(discr) = intrinsic
@@ -28,17 +32,16 @@ impl<'tcx> crate::MirPass<'tcx> for SimplifyConstCondition {
                     && let Some(constant) = c.const_.try_eval_bool(tcx, typing_env)
                 {
                     if constant {
-                        stmt.make_nop();
+                        patch.nop_statement(Location { block: bb, statement_index });
                     } else {
-                        block.statements.clear();
-                        block.terminator_mut().kind = TerminatorKind::Unreachable;
+                        patch.patch_terminator(bb, TerminatorKind::Unreachable);
                         continue 'blocks;
                     }
                 }
             }
 
-            let terminator = block.terminator_mut();
-            terminator.kind = match terminator.kind {
+            let terminator = block.terminator();
+            let terminator = match terminator.kind {
                 TerminatorKind::SwitchInt {
                     discr: Operand::Constant(ref c), ref targets, ..
                 } => {
@@ -58,7 +61,9 @@ impl<'tcx> crate::MirPass<'tcx> for SimplifyConstCondition {
                 },
                 _ => continue,
             };
+            patch.patch_terminator(bb, terminator);
         }
+        patch.apply(body);
     }
 
     fn is_required(&self) -> bool {
diff --git a/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs b/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs
index c40739d12e6..9b3dc1f691f 100644
--- a/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs
+++ b/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs
@@ -664,7 +664,7 @@ fn coroutine_closure_to_ambiguous_coroutine<I: Interner>(
 pub(in crate::solve) fn extract_fn_def_from_const_callable<I: Interner>(
     cx: I,
     self_ty: I::Ty,
-) -> Result<(ty::Binder<I, (I::FnInputTys, I::Ty)>, I::FunctionId, I::GenericArgs), NoSolution> {
+) -> Result<(ty::Binder<I, (I::Ty, I::Ty)>, I::FunctionId, I::GenericArgs), NoSolution> {
     match self_ty.kind() {
         ty::FnDef(def_id, args) => {
             let sig = cx.fn_sig(def_id);
@@ -673,7 +673,8 @@ pub(in crate::solve) fn extract_fn_def_from_const_callable<I: Interner>(
                 && cx.fn_is_const(def_id)
             {
                 Ok((
-                    sig.instantiate(cx, args).map_bound(|sig| (sig.inputs(), sig.output())),
+                    sig.instantiate(cx, args)
+                        .map_bound(|sig| (Ty::new_tup(cx, sig.inputs().as_slice()), sig.output())),
                     def_id,
                     args,
                 ))
diff --git a/compiler/rustc_next_trait_solver/src/solve/effect_goals.rs b/compiler/rustc_next_trait_solver/src/solve/effect_goals.rs
index cb72c1cd92b..65a5edf6b72 100644
--- a/compiler/rustc_next_trait_solver/src/solve/effect_goals.rs
+++ b/compiler/rustc_next_trait_solver/src/solve/effect_goals.rs
@@ -234,12 +234,12 @@ where
         let self_ty = goal.predicate.self_ty();
         let (inputs_and_output, def_id, args) =
             structural_traits::extract_fn_def_from_const_callable(cx, self_ty)?;
+        let (inputs, output) = ecx.instantiate_binder_with_infer(inputs_and_output);
 
         // A built-in `Fn` impl only holds if the output is sized.
         // (FIXME: technically we only need to check this if the type is a fn ptr...)
-        let output_is_sized_pred = inputs_and_output.map_bound(|(_, output)| {
-            ty::TraitRef::new(cx, cx.require_trait_lang_item(SolverTraitLangItem::Sized), [output])
-        });
+        let output_is_sized_pred =
+            ty::TraitRef::new(cx, cx.require_trait_lang_item(SolverTraitLangItem::Sized), [output]);
         let requirements = cx
             .const_conditions(def_id.into())
             .iter_instantiated(cx, args)
@@ -251,15 +251,12 @@ where
             })
             .chain([(GoalSource::ImplWhereBound, goal.with(cx, output_is_sized_pred))]);
 
-        let pred = inputs_and_output
-            .map_bound(|(inputs, _)| {
-                ty::TraitRef::new(
-                    cx,
-                    goal.predicate.def_id(),
-                    [goal.predicate.self_ty(), Ty::new_tup(cx, inputs.as_slice())],
-                )
-            })
-            .to_host_effect_clause(cx, goal.predicate.constness);
+        let pred = ty::Binder::dummy(ty::TraitRef::new(
+            cx,
+            goal.predicate.def_id(),
+            [goal.predicate.self_ty(), inputs],
+        ))
+        .to_host_effect_clause(cx, goal.predicate.constness);
 
         Self::probe_and_consider_implied_clause(
             ecx,
diff --git a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs
index 85110530ae9..f25003bbfe9 100644
--- a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs
+++ b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs
@@ -633,28 +633,19 @@ where
     // the certainty of all the goals.
     #[instrument(level = "trace", skip(self))]
     pub(super) fn try_evaluate_added_goals(&mut self) -> Result<Certainty, NoSolution> {
-        let mut response = Ok(Certainty::overflow(false));
         for _ in 0..FIXPOINT_STEP_LIMIT {
-            // FIXME: This match is a bit ugly, it might be nice to change the inspect
-            // stuff to use a closure instead. which should hopefully simplify this a bit.
             match self.evaluate_added_goals_step() {
-                Ok(Some(cert)) => {
-                    response = Ok(cert);
-                    break;
-                }
                 Ok(None) => {}
+                Ok(Some(cert)) => return Ok(cert),
                 Err(NoSolution) => {
-                    response = Err(NoSolution);
-                    break;
+                    self.tainted = Err(NoSolution);
+                    return Err(NoSolution);
                 }
             }
         }
 
-        if response.is_err() {
-            self.tainted = Err(NoSolution);
-        }
-
-        response
+        debug!("try_evaluate_added_goals: encountered overflow");
+        Ok(Certainty::overflow(false))
     }
 
     /// Iterate over all added goals: returning `Ok(Some(_))` in case we can stop rerunning.
diff --git a/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs b/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs
index 653c59c5d42..0674b3d42ab 100644
--- a/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs
+++ b/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs
@@ -451,23 +451,22 @@ where
                     return ecx.forced_ambiguity(MaybeCause::Ambiguity);
                 }
             };
+        let (inputs, output) = ecx.instantiate_binder_with_infer(tupled_inputs_and_output);
 
         // A built-in `Fn` impl only holds if the output is sized.
         // (FIXME: technically we only need to check this if the type is a fn ptr...)
-        let output_is_sized_pred = tupled_inputs_and_output.map_bound(|(_, output)| {
-            ty::TraitRef::new(cx, cx.require_trait_lang_item(SolverTraitLangItem::Sized), [output])
-        });
+        let output_is_sized_pred =
+            ty::TraitRef::new(cx, cx.require_trait_lang_item(SolverTraitLangItem::Sized), [output]);
 
-        let pred = tupled_inputs_and_output
-            .map_bound(|(inputs, output)| ty::ProjectionPredicate {
-                projection_term: ty::AliasTerm::new(
-                    cx,
-                    goal.predicate.def_id(),
-                    [goal.predicate.self_ty(), inputs],
-                ),
-                term: output.into(),
-            })
-            .upcast(cx);
+        let pred = ty::ProjectionPredicate {
+            projection_term: ty::AliasTerm::new(
+                cx,
+                goal.predicate.def_id(),
+                [goal.predicate.self_ty(), inputs],
+            ),
+            term: output.into(),
+        }
+        .upcast(cx);
 
         Self::probe_and_consider_implied_clause(
             ecx,
@@ -497,76 +496,56 @@ where
                 goal_kind,
                 env_region,
             )?;
+        let AsyncCallableRelevantTypes {
+            tupled_inputs_ty,
+            output_coroutine_ty,
+            coroutine_return_ty,
+        } = ecx.instantiate_binder_with_infer(tupled_inputs_and_output_and_coroutine);
 
         // A built-in `AsyncFn` impl only holds if the output is sized.
         // (FIXME: technically we only need to check this if the type is a fn ptr...)
-        let output_is_sized_pred = tupled_inputs_and_output_and_coroutine.map_bound(
-            |AsyncCallableRelevantTypes { output_coroutine_ty: output_ty, .. }| {
-                ty::TraitRef::new(
-                    cx,
-                    cx.require_trait_lang_item(SolverTraitLangItem::Sized),
-                    [output_ty],
-                )
-            },
+        let output_is_sized_pred = ty::TraitRef::new(
+            cx,
+            cx.require_trait_lang_item(SolverTraitLangItem::Sized),
+            [output_coroutine_ty],
         );
 
-        let pred = tupled_inputs_and_output_and_coroutine
-            .map_bound(
-                |AsyncCallableRelevantTypes {
-                     tupled_inputs_ty,
-                     output_coroutine_ty,
-                     coroutine_return_ty,
-                 }| {
-                    let (projection_term, term) = if cx
-                        .is_lang_item(goal.predicate.def_id(), SolverLangItem::CallOnceFuture)
-                    {
-                        (
-                            ty::AliasTerm::new(
-                                cx,
-                                goal.predicate.def_id(),
-                                [goal.predicate.self_ty(), tupled_inputs_ty],
-                            ),
-                            output_coroutine_ty.into(),
-                        )
-                    } else if cx
-                        .is_lang_item(goal.predicate.def_id(), SolverLangItem::CallRefFuture)
-                    {
-                        (
-                            ty::AliasTerm::new(
-                                cx,
-                                goal.predicate.def_id(),
-                                [
-                                    I::GenericArg::from(goal.predicate.self_ty()),
-                                    tupled_inputs_ty.into(),
-                                    env_region.into(),
-                                ],
-                            ),
-                            output_coroutine_ty.into(),
-                        )
-                    } else if cx
-                        .is_lang_item(goal.predicate.def_id(), SolverLangItem::AsyncFnOnceOutput)
-                    {
-                        (
-                            ty::AliasTerm::new(
-                                cx,
-                                goal.predicate.def_id(),
-                                [
-                                    I::GenericArg::from(goal.predicate.self_ty()),
-                                    tupled_inputs_ty.into(),
-                                ],
-                            ),
-                            coroutine_return_ty.into(),
-                        )
-                    } else {
-                        panic!(
-                            "no such associated type in `AsyncFn*`: {:?}",
-                            goal.predicate.def_id()
-                        )
-                    };
-                    ty::ProjectionPredicate { projection_term, term }
-                },
-            )
-            .upcast(cx);
+        let (projection_term, term) =
+            if cx.is_lang_item(goal.predicate.def_id(), SolverLangItem::CallOnceFuture) {
+                (
+                    ty::AliasTerm::new(
+                        cx,
+                        goal.predicate.def_id(),
+                        [goal.predicate.self_ty(), tupled_inputs_ty],
+                    ),
+                    output_coroutine_ty.into(),
+                )
+            } else if cx.is_lang_item(goal.predicate.def_id(), SolverLangItem::CallRefFuture) {
+                (
+                    ty::AliasTerm::new(
+                        cx,
+                        goal.predicate.def_id(),
+                        [
+                            I::GenericArg::from(goal.predicate.self_ty()),
+                            tupled_inputs_ty.into(),
+                            env_region.into(),
+                        ],
+                    ),
+                    output_coroutine_ty.into(),
+                )
+            } else if cx.is_lang_item(goal.predicate.def_id(), SolverLangItem::AsyncFnOnceOutput) {
+                (
+                    ty::AliasTerm::new(
+                        cx,
+                        goal.predicate.def_id(),
+                        [goal.predicate.self_ty(), tupled_inputs_ty],
+                    ),
+                    coroutine_return_ty.into(),
+                )
+            } else {
+                panic!("no such associated type in `AsyncFn*`: {:?}", goal.predicate.def_id())
+            };
+        let pred = ty::ProjectionPredicate { projection_term, term }.upcast(cx);
 
         Self::probe_and_consider_implied_clause(
             ecx,
diff --git a/compiler/rustc_next_trait_solver/src/solve/search_graph.rs b/compiler/rustc_next_trait_solver/src/solve/search_graph.rs
index aa9dfc9a9a2..109c8476ccb 100644
--- a/compiler/rustc_next_trait_solver/src/solve/search_graph.rs
+++ b/compiler/rustc_next_trait_solver/src/solve/search_graph.rs
@@ -74,20 +74,28 @@ where
         }
     }
 
-    fn is_initial_provisional_result(
-        cx: Self::Cx,
-        kind: PathKind,
-        input: CanonicalInput<I>,
-        result: QueryResult<I>,
-    ) -> bool {
-        Self::initial_provisional_result(cx, kind, input) == result
+    fn is_initial_provisional_result(result: QueryResult<I>) -> Option<PathKind> {
+        match result {
+            Ok(response) => {
+                if has_no_inference_or_external_constraints(response) {
+                    if response.value.certainty == Certainty::Yes {
+                        return Some(PathKind::Coinductive);
+                    } else if response.value.certainty == Certainty::overflow(false) {
+                        return Some(PathKind::Unknown);
+                    }
+                }
+
+                None
+            }
+            Err(NoSolution) => Some(PathKind::Inductive),
+        }
     }
 
-    fn on_stack_overflow(cx: I, input: CanonicalInput<I>) -> QueryResult<I> {
+    fn stack_overflow_result(cx: I, input: CanonicalInput<I>) -> QueryResult<I> {
         response_no_constraints(cx, input, Certainty::overflow(true))
     }
 
-    fn on_fixpoint_overflow(cx: I, input: CanonicalInput<I>) -> QueryResult<I> {
+    fn fixpoint_overflow_result(cx: I, input: CanonicalInput<I>) -> QueryResult<I> {
         response_no_constraints(cx, input, Certainty::overflow(false))
     }
 
diff --git a/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs b/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs
index 3974114e9b4..e790ecd595b 100644
--- a/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs
+++ b/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs
@@ -369,18 +369,16 @@ where
                     return ecx.forced_ambiguity(MaybeCause::Ambiguity);
                 }
             };
+        let (inputs, output) = ecx.instantiate_binder_with_infer(tupled_inputs_and_output);
 
         // A built-in `Fn` impl only holds if the output is sized.
         // (FIXME: technically we only need to check this if the type is a fn ptr...)
-        let output_is_sized_pred = tupled_inputs_and_output.map_bound(|(_, output)| {
-            ty::TraitRef::new(cx, cx.require_trait_lang_item(SolverTraitLangItem::Sized), [output])
-        });
+        let output_is_sized_pred =
+            ty::TraitRef::new(cx, cx.require_trait_lang_item(SolverTraitLangItem::Sized), [output]);
 
-        let pred = tupled_inputs_and_output
-            .map_bound(|(inputs, _)| {
-                ty::TraitRef::new(cx, goal.predicate.def_id(), [goal.predicate.self_ty(), inputs])
-            })
-            .upcast(cx);
+        let pred =
+            ty::TraitRef::new(cx, goal.predicate.def_id(), [goal.predicate.self_ty(), inputs])
+                .upcast(cx);
         Self::probe_and_consider_implied_clause(
             ecx,
             CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
@@ -408,28 +406,26 @@ where
                 // This region doesn't matter because we're throwing away the coroutine type
                 Region::new_static(cx),
             )?;
+        let AsyncCallableRelevantTypes {
+            tupled_inputs_ty,
+            output_coroutine_ty,
+            coroutine_return_ty: _,
+        } = ecx.instantiate_binder_with_infer(tupled_inputs_and_output_and_coroutine);
 
         // A built-in `AsyncFn` impl only holds if the output is sized.
         // (FIXME: technically we only need to check this if the type is a fn ptr...)
-        let output_is_sized_pred = tupled_inputs_and_output_and_coroutine.map_bound(
-            |AsyncCallableRelevantTypes { output_coroutine_ty, .. }| {
-                ty::TraitRef::new(
-                    cx,
-                    cx.require_trait_lang_item(SolverTraitLangItem::Sized),
-                    [output_coroutine_ty],
-                )
-            },
+        let output_is_sized_pred = ty::TraitRef::new(
+            cx,
+            cx.require_trait_lang_item(SolverTraitLangItem::Sized),
+            [output_coroutine_ty],
         );
 
-        let pred = tupled_inputs_and_output_and_coroutine
-            .map_bound(|AsyncCallableRelevantTypes { tupled_inputs_ty, .. }| {
-                ty::TraitRef::new(
-                    cx,
-                    goal.predicate.def_id(),
-                    [goal.predicate.self_ty(), tupled_inputs_ty],
-                )
-            })
-            .upcast(cx);
+        let pred = ty::TraitRef::new(
+            cx,
+            goal.predicate.def_id(),
+            [goal.predicate.self_ty(), tupled_inputs_ty],
+        )
+        .upcast(cx);
         Self::probe_and_consider_implied_clause(
             ecx,
             CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs
index 88b67d792de..c26c7b9122a 100644
--- a/compiler/rustc_parse/src/lib.rs
+++ b/compiler/rustc_parse/src/lib.rs
@@ -9,6 +9,7 @@
 #![feature(default_field_values)]
 #![feature(if_let_guard)]
 #![feature(iter_intersperse)]
+#![feature(iter_order_by)]
 #![recursion_limit = "256"]
 // tidy-alphabetical-end
 
diff --git a/compiler/rustc_parse/src/parser/tokenstream/tests.rs b/compiler/rustc_parse/src/parser/tokenstream/tests.rs
index 19b2c98f5af..63177a72744 100644
--- a/compiler/rustc_parse/src/parser/tokenstream/tests.rs
+++ b/compiler/rustc_parse/src/parser/tokenstream/tests.rs
@@ -15,7 +15,7 @@ fn sp(a: u32, b: u32) -> Span {
 }
 
 fn cmp_token_stream(a: &TokenStream, b: &TokenStream) -> bool {
-    a.len() == b.len() && a.iter().zip(b.iter()).all(|(x, y)| x.eq_unspanned(y))
+    a.iter().eq_by(b.iter(), |x, y| x.eq_unspanned(y))
 }
 
 #[test]
diff --git a/compiler/rustc_passes/messages.ftl b/compiler/rustc_passes/messages.ftl
index 870e0a90b54..df4016dfa1b 100644
--- a/compiler/rustc_passes/messages.ftl
+++ b/compiler/rustc_passes/messages.ftl
@@ -140,8 +140,17 @@ passes_doc_attribute_not_attribute =
     nonexistent builtin attribute `{$attribute}` used in `#[doc(attribute = "...")]`
     .help = only existing builtin attributes are allowed in core/std
 
-passes_doc_cfg_hide_takes_list =
-    `#[doc(cfg_hide(...))]` takes a list of attributes
+passes_doc_auto_cfg_expects_hide_or_show =
+    only `hide` or `show` are allowed in `#[doc(auto_cfg(...))]`
+
+passes_doc_auto_cfg_hide_show_expects_list =
+    `#![doc(auto_cfg({$attr_name}(...)))]` expects a list of items
+
+passes_doc_auto_cfg_hide_show_unexpected_item =
+    `#![doc(auto_cfg({$attr_name}(...)))]` only accepts identifiers or key/value items
+
+passes_doc_auto_cfg_wrong_literal =
+    expected boolean for `#[doc(auto_cfg = ...)]`
 
 passes_doc_expect_str =
     doc {$attr_name} attribute expects a string: #[doc({$attr_name} = "a")]
diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs
index 007353f136d..4ea237cfa03 100644
--- a/compiler/rustc_passes/src/check_attr.rs
+++ b/compiler/rustc_passes/src/check_attr.rs
@@ -10,7 +10,7 @@ use std::collections::hash_map::Entry;
 use std::slice;
 
 use rustc_abi::{Align, ExternAbi, Size};
-use rustc_ast::{AttrStyle, LitKind, MetaItemInner, MetaItemKind, ast};
+use rustc_ast::{AttrStyle, LitKind, MetaItem, MetaItemInner, MetaItemKind, ast};
 use rustc_attr_parsing::{AttributeParser, Late};
 use rustc_data_structures::fx::FxHashMap;
 use rustc_errors::{Applicability, DiagCtxtHandle, IntoDiagArg, MultiSpan, StashKey};
@@ -1160,16 +1160,59 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
         }
     }
 
-    /// Check that the `#![doc(cfg_hide(...))]` attribute only contains a list of attributes.
-    ///
-    fn check_doc_cfg_hide(&self, meta: &MetaItemInner, hir_id: HirId) {
-        if meta.meta_item_list().is_none() {
-            self.tcx.emit_node_span_lint(
-                INVALID_DOC_ATTRIBUTES,
-                hir_id,
-                meta.span(),
-                errors::DocCfgHideTakesList,
-            );
+    /// Check that the `#![doc(auto_cfg)]` attribute has the expected input.
+    fn check_doc_auto_cfg(&self, meta: &MetaItem, hir_id: HirId) {
+        match &meta.kind {
+            MetaItemKind::Word => {}
+            MetaItemKind::NameValue(lit) => {
+                if !matches!(lit.kind, LitKind::Bool(_)) {
+                    self.tcx.emit_node_span_lint(
+                        INVALID_DOC_ATTRIBUTES,
+                        hir_id,
+                        meta.span,
+                        errors::DocAutoCfgWrongLiteral,
+                    );
+                }
+            }
+            MetaItemKind::List(list) => {
+                for item in list {
+                    let Some(attr_name @ (sym::hide | sym::show)) = item.name() else {
+                        self.tcx.emit_node_span_lint(
+                            INVALID_DOC_ATTRIBUTES,
+                            hir_id,
+                            meta.span,
+                            errors::DocAutoCfgExpectsHideOrShow,
+                        );
+                        continue;
+                    };
+                    if let Some(list) = item.meta_item_list() {
+                        for item in list {
+                            let valid = item.meta_item().is_some_and(|meta| {
+                                meta.path.segments.len() == 1
+                                    && matches!(
+                                        &meta.kind,
+                                        MetaItemKind::Word | MetaItemKind::NameValue(_)
+                                    )
+                            });
+                            if !valid {
+                                self.tcx.emit_node_span_lint(
+                                    INVALID_DOC_ATTRIBUTES,
+                                    hir_id,
+                                    item.span(),
+                                    errors::DocAutoCfgHideShowUnexpectedItem { attr_name },
+                                );
+                            }
+                        }
+                    } else {
+                        self.tcx.emit_node_span_lint(
+                            INVALID_DOC_ATTRIBUTES,
+                            hir_id,
+                            meta.span,
+                            errors::DocAutoCfgHideShowExpectsList { attr_name },
+                        );
+                    }
+                }
+            }
         }
     }
 
@@ -1245,10 +1288,8 @@ impl<'tcx> CheckAttrVisitor<'tcx> {
                             self.check_attr_crate_level(attr, style, meta, hir_id);
                         }
 
-                        Some(sym::cfg_hide) => {
-                            if self.check_attr_crate_level(attr, style, meta, hir_id) {
-                                self.check_doc_cfg_hide(meta, hir_id);
-                            }
+                        Some(sym::auto_cfg) => {
+                            self.check_doc_auto_cfg(i_meta, hir_id);
                         }
 
                         Some(sym::inline | sym::no_inline) => {
diff --git a/compiler/rustc_passes/src/errors.rs b/compiler/rustc_passes/src/errors.rs
index cfd6b9e6dff..f0726014e0a 100644
--- a/compiler/rustc_passes/src/errors.rs
+++ b/compiler/rustc_passes/src/errors.rs
@@ -309,8 +309,24 @@ pub(crate) struct DocTestLiteral;
 pub(crate) struct DocTestTakesList;
 
 #[derive(LintDiagnostic)]
-#[diag(passes_doc_cfg_hide_takes_list)]
-pub(crate) struct DocCfgHideTakesList;
+#[diag(passes_doc_auto_cfg_wrong_literal)]
+pub(crate) struct DocAutoCfgWrongLiteral;
+
+#[derive(LintDiagnostic)]
+#[diag(passes_doc_auto_cfg_expects_hide_or_show)]
+pub(crate) struct DocAutoCfgExpectsHideOrShow;
+
+#[derive(LintDiagnostic)]
+#[diag(passes_doc_auto_cfg_hide_show_expects_list)]
+pub(crate) struct DocAutoCfgHideShowExpectsList {
+    pub attr_name: Symbol,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(passes_doc_auto_cfg_hide_show_unexpected_item)]
+pub(crate) struct DocAutoCfgHideShowUnexpectedItem {
+    pub attr_name: Symbol,
+}
 
 #[derive(LintDiagnostic)]
 #[diag(passes_doc_test_unknown_any)]
diff --git a/compiler/rustc_resolve/src/ident.rs b/compiler/rustc_resolve/src/ident.rs
index 51489019950..4415300777f 100644
--- a/compiler/rustc_resolve/src/ident.rs
+++ b/compiler/rustc_resolve/src/ident.rs
@@ -901,6 +901,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
                 binding,
                 if resolution.non_glob_binding.is_some() { resolution.glob_binding } else { None },
                 parent_scope,
+                module,
                 finalize,
                 shadowing,
             );
@@ -1025,6 +1026,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
         binding: Option<NameBinding<'ra>>,
         shadowed_glob: Option<NameBinding<'ra>>,
         parent_scope: &ParentScope<'ra>,
+        module: Module<'ra>,
         finalize: Finalize,
         shadowing: Shadowing,
     ) -> Result<NameBinding<'ra>, (Determinacy, Weak)> {
@@ -1076,6 +1078,37 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
             self.macro_expanded_macro_export_errors.insert((path_span, binding.span));
         }
 
+        // If we encounter a re-export for a type with private fields, it will not be able to
+        // be constructed through this re-export. We track that case here to expand later
+        // privacy errors with appropriate information.
+        if let Res::Def(_, def_id) = binding.res() {
+            let struct_ctor = match def_id.as_local() {
+                Some(def_id) => self.struct_constructors.get(&def_id).cloned(),
+                None => {
+                    let ctor = self.cstore().ctor_untracked(def_id);
+                    ctor.map(|(ctor_kind, ctor_def_id)| {
+                        let ctor_res = Res::Def(
+                            DefKind::Ctor(rustc_hir::def::CtorOf::Struct, ctor_kind),
+                            ctor_def_id,
+                        );
+                        let ctor_vis = self.tcx.visibility(ctor_def_id);
+                        let field_visibilities = self
+                            .tcx
+                            .associated_item_def_ids(def_id)
+                            .iter()
+                            .map(|field_id| self.tcx.visibility(field_id))
+                            .collect();
+                        (ctor_res, ctor_vis, field_visibilities)
+                    })
+                }
+            };
+            if let Some((_, _, fields)) = struct_ctor
+                && fields.iter().any(|vis| !self.is_accessible_from(*vis, module))
+            {
+                self.inaccessible_ctor_reexport.insert(path_span, binding.span);
+            }
+        }
+
         self.record_use(ident, binding, used);
         return Ok(binding);
     }
diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs
index 9e3c0938836..8c2ddda7f98 100644
--- a/compiler/rustc_resolve/src/late/diagnostics.rs
+++ b/compiler/rustc_resolve/src/late/diagnostics.rs
@@ -1942,44 +1942,77 @@ impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> {
                     return true;
                 };
 
+                let update_message =
+                    |this: &mut Self, err: &mut Diag<'_>, source: &PathSource<'_, '_, '_>| {
+                        match source {
+                            // e.g. `if let Enum::TupleVariant(field1, field2) = _`
+                            PathSource::TupleStruct(_, pattern_spans) => {
+                                err.primary_message(
+                                "cannot match against a tuple struct which contains private fields",
+                            );
+
+                                // Use spans of the tuple struct pattern.
+                                Some(Vec::from(*pattern_spans))
+                            }
+                            // e.g. `let _ = Enum::TupleVariant(field1, field2);`
+                            PathSource::Expr(Some(Expr {
+                                kind: ExprKind::Call(path, args),
+                                span: call_span,
+                                ..
+                            })) => {
+                                err.primary_message(
+                                "cannot initialize a tuple struct which contains private fields",
+                            );
+                                this.suggest_alternative_construction_methods(
+                                    def_id,
+                                    err,
+                                    path.span,
+                                    *call_span,
+                                    &args[..],
+                                );
+                                // Use spans of the tuple struct definition.
+                                this.r
+                                    .field_idents(def_id)
+                                    .map(|fields| fields.iter().map(|f| f.span).collect::<Vec<_>>())
+                            }
+                            _ => None,
+                        }
+                    };
                 let is_accessible = self.r.is_accessible_from(ctor_vis, self.parent_scope.module);
+                if let Some(use_span) = self.r.inaccessible_ctor_reexport.get(&span)
+                    && is_accessible
+                {
+                    err.span_note(
+                        *use_span,
+                        "the type is accessed through this re-export, but the type's constructor \
+                         is not visible in this import's scope due to private fields",
+                    );
+                    if is_accessible
+                        && fields
+                            .iter()
+                            .all(|vis| self.r.is_accessible_from(*vis, self.parent_scope.module))
+                    {
+                        err.span_suggestion_verbose(
+                            span,
+                            "the type can be constructed directly, because its fields are \
+                             available from the current scope",
+                            // Using `tcx.def_path_str` causes the compiler to hang.
+                            // We don't need to handle foreign crate types because in that case you
+                            // can't access the ctor either way.
+                            format!(
+                                "crate{}", // The method already has leading `::`.
+                                self.r.tcx.def_path(def_id).to_string_no_crate_verbose(),
+                            ),
+                            Applicability::MachineApplicable,
+                        );
+                    }
+                    update_message(self, err, &source);
+                }
                 if !is_expected(ctor_def) || is_accessible {
                     return true;
                 }
 
-                let field_spans = match source {
-                    // e.g. `if let Enum::TupleVariant(field1, field2) = _`
-                    PathSource::TupleStruct(_, pattern_spans) => {
-                        err.primary_message(
-                            "cannot match against a tuple struct which contains private fields",
-                        );
-
-                        // Use spans of the tuple struct pattern.
-                        Some(Vec::from(pattern_spans))
-                    }
-                    // e.g. `let _ = Enum::TupleVariant(field1, field2);`
-                    PathSource::Expr(Some(Expr {
-                        kind: ExprKind::Call(path, args),
-                        span: call_span,
-                        ..
-                    })) => {
-                        err.primary_message(
-                            "cannot initialize a tuple struct which contains private fields",
-                        );
-                        self.suggest_alternative_construction_methods(
-                            def_id,
-                            err,
-                            path.span,
-                            *call_span,
-                            &args[..],
-                        );
-                        // Use spans of the tuple struct definition.
-                        self.r
-                            .field_idents(def_id)
-                            .map(|fields| fields.iter().map(|f| f.span).collect::<Vec<_>>())
-                    }
-                    _ => None,
-                };
+                let field_spans = update_message(self, err, &source);
 
                 if let Some(spans) =
                     field_spans.filter(|spans| spans.len() > 0 && fields.len() == spans.len())
diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs
index 8959068b2a6..b44b1c966a4 100644
--- a/compiler/rustc_resolve/src/lib.rs
+++ b/compiler/rustc_resolve/src/lib.rs
@@ -1167,6 +1167,11 @@ pub struct Resolver<'ra, 'tcx> {
     /// Crate-local macro expanded `macro_export` referred to by a module-relative path.
     macro_expanded_macro_export_errors: BTreeSet<(Span, Span)> = BTreeSet::new(),
 
+    /// When a type is re-exported that has an inaccessible constructor because it has fields that
+    /// are inaccessible from the import's scope, we mark that as the type won't be able to be built
+    /// through the re-export. We use this information to extend the existing diagnostic.
+    inaccessible_ctor_reexport: FxHashMap<Span, Span>,
+
     arenas: &'ra ResolverArenas<'ra>,
     dummy_binding: NameBinding<'ra>,
     builtin_types_bindings: FxHashMap<Symbol, NameBinding<'ra>>,
@@ -1595,6 +1600,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> {
             glob_map: Default::default(),
             used_imports: FxHashSet::default(),
             maybe_unused_trait_imports: Default::default(),
+            inaccessible_ctor_reexport: Default::default(),
 
             arenas,
             dummy_binding: arenas.new_pub_res_binding(Res::Err, DUMMY_SP, LocalExpnId::ROOT),
diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs
index ebb6a93b1dd..d1426ff55fb 100644
--- a/compiler/rustc_session/src/config.rs
+++ b/compiler/rustc_session/src/config.rs
@@ -258,6 +258,8 @@ pub enum AutoDiff {
     LooseTypes,
     /// Runs Enzyme's aggressive inlining
     Inline,
+    /// Disable Type Tree
+    NoTT,
 }
 
 /// Settings for `-Z instrument-xray` flag.
@@ -1193,6 +1195,7 @@ pub struct OutputFilenames {
     filestem: String,
     pub single_output_file: Option<OutFileName>,
     temps_directory: Option<PathBuf>,
+    explicit_dwo_out_directory: Option<PathBuf>,
     pub outputs: OutputTypes,
 }
 
@@ -1225,6 +1228,7 @@ impl OutputFilenames {
         out_filestem: String,
         single_output_file: Option<OutFileName>,
         temps_directory: Option<PathBuf>,
+        explicit_dwo_out_directory: Option<PathBuf>,
         extra: String,
         outputs: OutputTypes,
     ) -> Self {
@@ -1232,6 +1236,7 @@ impl OutputFilenames {
             out_directory,
             single_output_file,
             temps_directory,
+            explicit_dwo_out_directory,
             outputs,
             crate_stem: format!("{out_crate_name}{extra}"),
             filestem: format!("{out_filestem}{extra}"),
@@ -1281,7 +1286,14 @@ impl OutputFilenames {
         codegen_unit_name: &str,
         invocation_temp: Option<&str>,
     ) -> PathBuf {
-        self.temp_path_ext_for_cgu(DWARF_OBJECT_EXT, codegen_unit_name, invocation_temp)
+        let p = self.temp_path_ext_for_cgu(DWARF_OBJECT_EXT, codegen_unit_name, invocation_temp);
+        if let Some(dwo_out) = &self.explicit_dwo_out_directory {
+            let mut o = dwo_out.clone();
+            o.push(p.file_name().unwrap());
+            o
+        } else {
+            p
+        }
     }
 
     /// Like `temp_path`, but also supports things where there is no corresponding
diff --git a/compiler/rustc_session/src/config/cfg.rs b/compiler/rustc_session/src/config/cfg.rs
index f3d91ce4a5d..a72f6201dce 100644
--- a/compiler/rustc_session/src/config/cfg.rs
+++ b/compiler/rustc_session/src/config/cfg.rs
@@ -259,11 +259,11 @@ pub(crate) fn default_configuration(sess: &Session) -> Cfg {
     });
     let mut has_atomic = false;
     for (i, align) in [
-        (8, layout.i8_align.abi),
-        (16, layout.i16_align.abi),
-        (32, layout.i32_align.abi),
-        (64, layout.i64_align.abi),
-        (128, layout.i128_align.abi),
+        (8, layout.i8_align),
+        (16, layout.i16_align),
+        (32, layout.i32_align),
+        (64, layout.i64_align),
+        (128, layout.i128_align),
     ] {
         if i >= sess.target.min_atomic_width() && i <= sess.target.max_atomic_width() {
             if !has_atomic {
diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs
index b2cc169f12c..6dd90546de1 100644
--- a/compiler/rustc_session/src/options.rs
+++ b/compiler/rustc_session/src/options.rs
@@ -792,7 +792,7 @@ mod desc {
     pub(crate) const parse_list: &str = "a space-separated list of strings";
     pub(crate) const parse_list_with_polarity: &str =
         "a comma-separated list of strings, with elements beginning with + or -";
-    pub(crate) const parse_autodiff: &str = "a comma separated list of settings: `Enable`, `PrintSteps`, `PrintTA`, `PrintTAFn`, `PrintAA`, `PrintPerf`, `PrintModBefore`, `PrintModAfter`, `PrintModFinal`, `PrintPasses`, `NoPostopt`, `LooseTypes`, `Inline`";
+    pub(crate) const parse_autodiff: &str = "a comma separated list of settings: `Enable`, `PrintSteps`, `PrintTA`, `PrintTAFn`, `PrintAA`, `PrintPerf`, `PrintModBefore`, `PrintModAfter`, `PrintModFinal`, `PrintPasses`, `NoPostopt`, `LooseTypes`, `Inline`, `NoTT`";
     pub(crate) const parse_offload: &str = "a comma separated list of settings: `Enable`";
     pub(crate) const parse_comma_list: &str = "a comma-separated list of strings";
     pub(crate) const parse_opt_comma_list: &str = parse_comma_list;
@@ -1481,6 +1481,7 @@ pub mod parse {
                 "PrintPasses" => AutoDiff::PrintPasses,
                 "LooseTypes" => AutoDiff::LooseTypes,
                 "Inline" => AutoDiff::Inline,
+                "NoTT" => AutoDiff::NoTT,
                 _ => {
                     // FIXME(ZuseZ4): print an error saying which value is not recognized
                     return false;
@@ -2633,6 +2634,8 @@ written to standard error output)"),
                  file which is ignored by the linker
         `single`: sections which do not require relocation are written into object file but ignored
                   by the linker"),
+    split_dwarf_out_dir : Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
+        "location for writing split DWARF objects (`.dwo`) if enabled"),
     split_lto_unit: Option<bool> = (None, parse_opt_bool, [TRACKED],
         "enable LTO unit splitting (default: no)"),
     src_hash_algorithm: Option<SourceFileHashAlgorithm> = (None, parse_src_file_hash, [TRACKED],
diff --git a/compiler/rustc_span/src/analyze_source_file.rs b/compiler/rustc_span/src/analyze_source_file.rs
index c32593a6d95..bb2cda77dff 100644
--- a/compiler/rustc_span/src/analyze_source_file.rs
+++ b/compiler/rustc_span/src/analyze_source_file.rs
@@ -81,8 +81,8 @@ cfg_select! {
                 // use `loadu`, which supports unaligned loading.
                 let chunk = unsafe { _mm_loadu_si128(chunk.as_ptr() as *const __m128i) };
 
-                // For character in the chunk, see if its byte value is < 0, which
-                // indicates that it's part of a UTF-8 char.
+                // For each character in the chunk, see if its byte value is < 0,
+                // which indicates that it's part of a UTF-8 char.
                 let multibyte_test = _mm_cmplt_epi8(chunk, _mm_set1_epi8(0));
                 // Create a bit mask from the comparison results.
                 let multibyte_mask = _mm_movemask_epi8(multibyte_test);
@@ -132,8 +132,111 @@ cfg_select! {
             }
         }
     }
+    target_arch = "loongarch64" => {
+        fn analyze_source_file_dispatch(
+            src: &str,
+            lines: &mut Vec<RelativeBytePos>,
+            multi_byte_chars: &mut Vec<MultiByteChar>,
+        ) {
+            use std::arch::is_loongarch_feature_detected;
+
+            if is_loongarch_feature_detected!("lsx") {
+                unsafe {
+                    analyze_source_file_lsx(src, lines, multi_byte_chars);
+                }
+            } else {
+                analyze_source_file_generic(
+                    src,
+                    src.len(),
+                    RelativeBytePos::from_u32(0),
+                    lines,
+                    multi_byte_chars,
+                );
+            }
+        }
+
+        /// Checks 16 byte chunks of text at a time. If the chunk contains
+        /// something other than printable ASCII characters and newlines, the
+        /// function falls back to the generic implementation. Otherwise it uses
+        /// LSX intrinsics to quickly find all newlines.
+        #[target_feature(enable = "lsx")]
+        unsafe fn analyze_source_file_lsx(
+            src: &str,
+            lines: &mut Vec<RelativeBytePos>,
+            multi_byte_chars: &mut Vec<MultiByteChar>,
+        ) {
+            use std::arch::loongarch64::*;
+
+            const CHUNK_SIZE: usize = 16;
+
+            let (chunks, tail) = src.as_bytes().as_chunks::<CHUNK_SIZE>();
+
+            // This variable keeps track of where we should start decoding a
+            // chunk. If a multi-byte character spans across chunk boundaries,
+            // we need to skip that part in the next chunk because we already
+            // handled it.
+            let mut intra_chunk_offset = 0;
+
+            for (chunk_index, chunk) in chunks.iter().enumerate() {
+                // All LSX memory instructions support unaligned access, so using
+                // vld is fine.
+                let chunk = unsafe { lsx_vld::<0>(chunk.as_ptr() as *const i8) };
+
+                // For each character in the chunk, see if its byte value is < 0,
+                // which indicates that it's part of a UTF-8 char.
+                let multibyte_mask = lsx_vmskltz_b(chunk);
+                // Create a bit mask from the comparison results.
+                let multibyte_mask = lsx_vpickve2gr_w::<0>(multibyte_mask);
+
+                // If the bit mask is all zero, we only have ASCII chars here:
+                if multibyte_mask == 0 {
+                    assert!(intra_chunk_offset == 0);
+
+                    // Check for newlines in the chunk
+                    let newlines_test = lsx_vseqi_b::<{b'\n' as i32}>(chunk);
+                    let newlines_mask = lsx_vmskltz_b(newlines_test);
+                    let mut newlines_mask = lsx_vpickve2gr_w::<0>(newlines_mask);
+
+                    let output_offset = RelativeBytePos::from_usize(chunk_index * CHUNK_SIZE + 1);
+
+                    while newlines_mask != 0 {
+                        let index = newlines_mask.trailing_zeros();
+
+                        lines.push(RelativeBytePos(index) + output_offset);
+
+                        // Clear the bit, so we can find the next one.
+                        newlines_mask &= newlines_mask - 1;
+                    }
+                } else {
+                    // The slow path.
+                    // There are multibyte chars in here, fallback to generic decoding.
+                    let scan_start = chunk_index * CHUNK_SIZE + intra_chunk_offset;
+                    intra_chunk_offset = analyze_source_file_generic(
+                        &src[scan_start..],
+                        CHUNK_SIZE - intra_chunk_offset,
+                        RelativeBytePos::from_usize(scan_start),
+                        lines,
+                        multi_byte_chars,
+                    );
+                }
+            }
+
+            // There might still be a tail left to analyze
+            let tail_start = src.len() - tail.len() + intra_chunk_offset;
+            if tail_start < src.len() {
+                analyze_source_file_generic(
+                    &src[tail_start..],
+                    src.len() - tail_start,
+                    RelativeBytePos::from_usize(tail_start),
+                    lines,
+                    multi_byte_chars,
+                );
+            }
+        }
+    }
     _ => {
-        // The target (or compiler version) does not support SSE2 ...
+        // The target (or compiler version) does not support vector instructions
+        // our specialized implementations need (x86 SSE2, loongarch64 LSX)...
         fn analyze_source_file_dispatch(
             src: &str,
             lines: &mut Vec<RelativeBytePos>,
diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs
index 35dbbe58db9..ededbea57e9 100644
--- a/compiler/rustc_span/src/lib.rs
+++ b/compiler/rustc_span/src/lib.rs
@@ -17,6 +17,7 @@
 
 // tidy-alphabetical-start
 #![allow(internal_features)]
+#![cfg_attr(target_arch = "loongarch64", feature(stdarch_loongarch))]
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
 #![doc(rust_logo)]
 #![feature(array_windows)]
diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs
index faf32523baa..b34a64108e3 100644
--- a/compiler/rustc_span/src/symbol.rs
+++ b/compiler/rustc_span/src/symbol.rs
@@ -545,6 +545,7 @@ symbols! {
         attributes,
         audit_that,
         augmented_assignments,
+        auto_cfg,
         auto_traits,
         autodiff,
         autodiff_forward,
@@ -628,7 +629,6 @@ symbols! {
         cfg_emscripten_wasm_eh,
         cfg_eval,
         cfg_fmt_debug,
-        cfg_hide,
         cfg_overflow_checks,
         cfg_panic,
         cfg_relocation_model,
@@ -679,6 +679,7 @@ symbols! {
         cmpxchg16b_target_feature,
         cmse_nonsecure_entry,
         coerce_pointee_validated,
+        coerce_shared,
         coerce_unsized,
         cold,
         cold_path,
@@ -939,6 +940,7 @@ symbols! {
         ermsb_target_feature,
         exact_div,
         except,
+        exception_handling: "exception-handling",
         exchange_malloc,
         exclusive_range_pattern,
         exhaustive_integer_patterns,
@@ -1149,6 +1151,7 @@ symbols! {
         hashset_iter_ty,
         hexagon_target_feature,
         hidden,
+        hide,
         hint,
         homogeneous_aggregate,
         host,
@@ -1987,6 +1990,7 @@ symbols! {
         shl_assign,
         shorter_tail_lifetimes,
         should_panic,
+        show,
         shr,
         shr_assign,
         sig_dfl,
diff --git a/compiler/rustc_target/src/callconv/arm.rs b/compiler/rustc_target/src/callconv/arm.rs
index 70830fa07b6..abc9a404e2e 100644
--- a/compiler/rustc_target/src/callconv/arm.rs
+++ b/compiler/rustc_target/src/callconv/arm.rs
@@ -77,7 +77,7 @@ where
         }
     }
 
-    let align = arg.layout.align.abi.bytes();
+    let align = arg.layout.align.bytes();
     let total = arg.layout.size;
     arg.cast_to(Uniform::consecutive(if align <= 4 { Reg::i32() } else { Reg::i64() }, total));
 }
diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs
index 9213d73e24e..bc3c9601fa3 100644
--- a/compiler/rustc_target/src/callconv/loongarch.rs
+++ b/compiler/rustc_target/src/callconv/loongarch.rs
@@ -322,7 +322,7 @@ fn classify_arg<'a, Ty, C>(
     }
 
     let total = arg.layout.size;
-    let align = arg.layout.align.abi.bits();
+    let align = arg.layout.align.bits();
 
     // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
     // the argument list with the address."
diff --git a/compiler/rustc_target/src/callconv/mips.rs b/compiler/rustc_target/src/callconv/mips.rs
index 48a01da865b..8ffd7bd1778 100644
--- a/compiler/rustc_target/src/callconv/mips.rs
+++ b/compiler/rustc_target/src/callconv/mips.rs
@@ -24,7 +24,7 @@ where
     }
     let dl = cx.data_layout();
     let size = arg.layout.size;
-    let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
+    let align = arg.layout.align.abi.max(dl.i32_align).min(dl.i64_align);
 
     if arg.layout.is_aggregate() {
         let pad_i32 = !offset.is_aligned(align);
diff --git a/compiler/rustc_target/src/callconv/mips64.rs b/compiler/rustc_target/src/callconv/mips64.rs
index 0209838bec1..8386a15933c 100644
--- a/compiler/rustc_target/src/callconv/mips64.rs
+++ b/compiler/rustc_target/src/callconv/mips64.rs
@@ -110,9 +110,9 @@ where
                 // We only care about aligned doubles
                 if let BackendRepr::Scalar(scalar) = field.backend_repr {
                     if scalar.primitive() == Primitive::Float(Float::F64) {
-                        if offset.is_aligned(dl.f64_align.abi) {
+                        if offset.is_aligned(dl.f64_align) {
                             // Insert enough integers to cover [last_offset, offset)
-                            assert!(last_offset.is_aligned(dl.f64_align.abi));
+                            assert!(last_offset.is_aligned(dl.f64_align));
                             for _ in 0..((offset - last_offset).bits() / 64)
                                 .min((prefix.len() - prefix_index) as u64)
                             {
diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs
index 7a7c63c475b..c59af581a1f 100644
--- a/compiler/rustc_target/src/callconv/mod.rs
+++ b/compiler/rustc_target/src/callconv/mod.rs
@@ -332,7 +332,7 @@ impl CastTarget {
         self.prefix
             .iter()
             .filter_map(|x| x.map(|reg| reg.align(cx)))
-            .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| {
+            .fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)), |acc, align| {
                 acc.max(align)
             })
     }
diff --git a/compiler/rustc_target/src/callconv/nvptx64.rs b/compiler/rustc_target/src/callconv/nvptx64.rs
index 44977de7fcb..dc32dd87a7e 100644
--- a/compiler/rustc_target/src/callconv/nvptx64.rs
+++ b/compiler/rustc_target/src/callconv/nvptx64.rs
@@ -21,7 +21,7 @@ fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
 
 /// the pass mode used for aggregates in arg and ret position
 fn classify_aggregate<Ty>(arg: &mut ArgAbi<'_, Ty>) {
-    let align_bytes = arg.layout.align.abi.bytes();
+    let align_bytes = arg.layout.align.bytes();
     let size = arg.layout.size;
 
     let reg = match align_bytes {
@@ -60,7 +60,7 @@ where
     //     "`extern \"ptx-kernel\"` doesn't allow passing types other than primitives and structs"
     // );
 
-    let align_bytes = arg.layout.align.abi.bytes();
+    let align_bytes = arg.layout.align.bytes();
 
     let unit = match align_bytes {
         1 => Reg::i8(),
diff --git a/compiler/rustc_target/src/callconv/powerpc64.rs b/compiler/rustc_target/src/callconv/powerpc64.rs
index 89ec85e4b66..be1d13816ef 100644
--- a/compiler/rustc_target/src/callconv/powerpc64.rs
+++ b/compiler/rustc_target/src/callconv/powerpc64.rs
@@ -89,7 +89,7 @@ where
         // Aggregates larger than i64 should be padded at the tail to fill out a whole number
         // of i64s or i128s, depending on the aggregate alignment. Always use an array for
         // this, even if there is only a single element.
-        let reg = if arg.layout.align.abi.bytes() > 8 { Reg::i128() } else { Reg::i64() };
+        let reg = if arg.layout.align.bytes() > 8 { Reg::i128() } else { Reg::i64() };
         arg.cast_to(Uniform::consecutive(
             reg,
             size.align_to(Align::from_bytes(reg.size.bytes()).unwrap()),
diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs
index 161e2c1645f..16de3fe070d 100644
--- a/compiler/rustc_target/src/callconv/riscv.rs
+++ b/compiler/rustc_target/src/callconv/riscv.rs
@@ -328,7 +328,7 @@ fn classify_arg<'a, Ty, C>(
     }
 
     let total = arg.layout.size;
-    let align = arg.layout.align.abi.bits();
+    let align = arg.layout.align.bits();
 
     // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
     // the argument list with the address."
diff --git a/compiler/rustc_target/src/callconv/sparc.rs b/compiler/rustc_target/src/callconv/sparc.rs
index 48a01da865b..8ffd7bd1778 100644
--- a/compiler/rustc_target/src/callconv/sparc.rs
+++ b/compiler/rustc_target/src/callconv/sparc.rs
@@ -24,7 +24,7 @@ where
     }
     let dl = cx.data_layout();
     let size = arg.layout.size;
-    let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
+    let align = arg.layout.align.abi.max(dl.i32_align).min(dl.i64_align);
 
     if arg.layout.is_aggregate() {
         let pad_i32 = !offset.is_aligned(align);
diff --git a/compiler/rustc_target/src/callconv/sparc64.rs b/compiler/rustc_target/src/callconv/sparc64.rs
index ecc9067ced3..62c8ed1dc21 100644
--- a/compiler/rustc_target/src/callconv/sparc64.rs
+++ b/compiler/rustc_target/src/callconv/sparc64.rs
@@ -29,7 +29,7 @@ where
 
     data.has_float = true;
 
-    if !data.last_offset.is_aligned(dl.f64_align.abi) && data.last_offset < offset {
+    if !data.last_offset.is_aligned(dl.f64_align) && data.last_offset < offset {
         if data.prefix_index == data.prefix.len() {
             return data;
         }
diff --git a/compiler/rustc_target/src/callconv/xtensa.rs b/compiler/rustc_target/src/callconv/xtensa.rs
index a73a70a1a0c..561ee98787d 100644
--- a/compiler/rustc_target/src/callconv/xtensa.rs
+++ b/compiler/rustc_target/src/callconv/xtensa.rs
@@ -48,7 +48,7 @@ where
     }
 
     let size = arg.layout.size.bits();
-    let needed_align = arg.layout.align.abi.bits();
+    let needed_align = arg.layout.align.bits();
     let mut must_use_stack = false;
 
     // Determine the number of GPRs needed to pass the current argument
diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/mod.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/mod.rs
index b3d1b8e3888..9052031ce4f 100644
--- a/compiler/rustc_trait_selection/src/error_reporting/traits/mod.rs
+++ b/compiler/rustc_trait_selection/src/error_reporting/traits/mod.rs
@@ -21,7 +21,7 @@ use rustc_infer::traits::{
 };
 use rustc_middle::ty::print::{PrintTraitRefExt as _, with_no_trimmed_paths};
 use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_span::{ErrorGuaranteed, ExpnKind, Span};
+use rustc_span::{DesugaringKind, ErrorGuaranteed, ExpnKind, Span};
 use tracing::{info, instrument};
 
 pub use self::overflow::*;
@@ -154,9 +154,20 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
             })
             .collect();
 
-        // Ensure `T: Sized`, `T: MetaSized`, `T: PointeeSized` and `T: WF` obligations come last.
+        // Ensure `T: Sized`, `T: MetaSized`, `T: PointeeSized` and `T: WF` obligations come last,
+        // and `Subtype` obligations from `FormatLiteral` desugarings come first.
         // This lets us display diagnostics with more relevant type information and hide redundant
         // E0282 errors.
+        #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
+        enum ErrorSortKey {
+            SubtypeFormat(usize, usize),
+            OtherKind,
+            SizedTrait,
+            MetaSizedTrait,
+            PointeeSizedTrait,
+            Coerce,
+            WellFormed,
+        }
         errors.sort_by_key(|e| {
             let maybe_sizedness_did = match e.obligation.predicate.kind().skip_binder() {
                 ty::PredicateKind::Clause(ty::ClauseKind::Trait(pred)) => Some(pred.def_id()),
@@ -165,12 +176,30 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
             };
 
             match e.obligation.predicate.kind().skip_binder() {
-                _ if maybe_sizedness_did == self.tcx.lang_items().sized_trait() => 1,
-                _ if maybe_sizedness_did == self.tcx.lang_items().meta_sized_trait() => 2,
-                _ if maybe_sizedness_did == self.tcx.lang_items().pointee_sized_trait() => 3,
-                ty::PredicateKind::Coerce(_) => 4,
-                ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(_)) => 5,
-                _ => 0,
+                ty::PredicateKind::Subtype(_)
+                    if matches!(
+                        e.obligation.cause.span.desugaring_kind(),
+                        Some(DesugaringKind::FormatLiteral { .. })
+                    ) =>
+                {
+                    let (_, row, col, ..) =
+                        self.tcx.sess.source_map().span_to_location_info(e.obligation.cause.span);
+                    ErrorSortKey::SubtypeFormat(row, col)
+                }
+                _ if maybe_sizedness_did == self.tcx.lang_items().sized_trait() => {
+                    ErrorSortKey::SizedTrait
+                }
+                _ if maybe_sizedness_did == self.tcx.lang_items().meta_sized_trait() => {
+                    ErrorSortKey::MetaSizedTrait
+                }
+                _ if maybe_sizedness_did == self.tcx.lang_items().pointee_sized_trait() => {
+                    ErrorSortKey::PointeeSizedTrait
+                }
+                ty::PredicateKind::Coerce(_) => ErrorSortKey::Coerce,
+                ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(_)) => {
+                    ErrorSortKey::WellFormed
+                }
+                _ => ErrorSortKey::OtherKind,
             }
         });
 
diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs
index a02e8ecf613..7f626e8c4e8 100644
--- a/compiler/rustc_transmute/src/layout/tree.rs
+++ b/compiler/rustc_transmute/src/layout/tree.rs
@@ -361,7 +361,7 @@ pub(crate) mod rustc {
 
                 ty::Ref(region, ty, mutability) => {
                     let layout = layout_of(cx, *ty)?;
-                    let referent_align = layout.align.abi.bytes_usize();
+                    let referent_align = layout.align.bytes_usize();
                     let referent_size = layout.size.bytes_usize();
 
                     Ok(Tree::Ref(Reference {
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index f59bc2117d5..317d101dafe 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -795,7 +795,7 @@ fn variant_info_for_adt<'tcx>(
                     name,
                     offset: offset.bytes(),
                     size: field_layout.size.bytes(),
-                    align: field_layout.align.abi.bytes(),
+                    align: field_layout.align.bytes(),
                     type_name: None,
                 }
             })
@@ -804,7 +804,7 @@ fn variant_info_for_adt<'tcx>(
         VariantInfo {
             name: n,
             kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
-            align: layout.align.abi.bytes(),
+            align: layout.align.bytes(),
             size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
             fields: field_info,
         }
@@ -877,7 +877,7 @@ fn variant_info_for_coroutine<'tcx>(
                 name: *name,
                 offset: offset.bytes(),
                 size: field_layout.size.bytes(),
-                align: field_layout.align.abi.bytes(),
+                align: field_layout.align.bytes(),
                 type_name: None,
             }
         })
@@ -905,7 +905,7 @@ fn variant_info_for_coroutine<'tcx>(
                         }),
                         offset: offset.bytes(),
                         size: field_layout.size.bytes(),
-                        align: field_layout.align.abi.bytes(),
+                        align: field_layout.align.bytes(),
                         // Include the type name if there is no field name, or if the name is the
                         // __awaitee placeholder symbol which means a child future being `.await`ed.
                         type_name: (field_name.is_none() || field_name == Some(sym::__awaitee))
@@ -946,7 +946,7 @@ fn variant_info_for_coroutine<'tcx>(
                 name: Some(Symbol::intern(&ty::CoroutineArgs::variant_name(variant_idx))),
                 kind: SizeKind::Exact,
                 size: variant_size.bytes(),
-                align: variant_layout.align.abi.bytes(),
+                align: variant_layout.align.bytes(),
                 fields,
             }
         })
diff --git a/compiler/rustc_ty_utils/src/layout/invariant.rs b/compiler/rustc_ty_utils/src/layout/invariant.rs
index 1311ee31182..b768269215f 100644
--- a/compiler/rustc_ty_utils/src/layout/invariant.rs
+++ b/compiler/rustc_ty_utils/src/layout/invariant.rs
@@ -8,7 +8,7 @@ use rustc_middle::ty::layout::{HasTyCtxt, LayoutCx, TyAndLayout};
 pub(super) fn layout_sanity_check<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayout<'tcx>) {
     let tcx = cx.tcx();
 
-    if !layout.size.bytes().is_multiple_of(layout.align.abi.bytes()) {
+    if !layout.size.bytes().is_multiple_of(layout.align.bytes()) {
         bug!("size is not a multiple of align, in the following layout:\n{layout:#?}");
     }
     if layout.size.bytes() >= tcx.data_layout.obj_size_bound() {
@@ -300,8 +300,8 @@ pub(super) fn layout_sanity_check<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayou
                 if variant.align.abi > layout.align.abi {
                     bug!(
                         "Type with alignment {} bytes has variant with alignment {} bytes: {layout:#?}",
-                        layout.align.abi.bytes(),
-                        variant.align.abi.bytes(),
+                        layout.align.bytes(),
+                        variant.align.bytes(),
                     )
                 }
                 // Skip empty variants.
diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs
index 8f8f019510f..7aa58d096d5 100644
--- a/compiler/rustc_type_ir/src/search_graph/mod.rs
+++ b/compiler/rustc_type_ir/src/search_graph/mod.rs
@@ -86,14 +86,12 @@ pub trait Delegate: Sized {
         kind: PathKind,
         input: <Self::Cx as Cx>::Input,
     ) -> <Self::Cx as Cx>::Result;
-    fn is_initial_provisional_result(
+    fn is_initial_provisional_result(result: <Self::Cx as Cx>::Result) -> Option<PathKind>;
+    fn stack_overflow_result(
         cx: Self::Cx,
-        kind: PathKind,
         input: <Self::Cx as Cx>::Input,
-        result: <Self::Cx as Cx>::Result,
-    ) -> bool;
-    fn on_stack_overflow(cx: Self::Cx, input: <Self::Cx as Cx>::Input) -> <Self::Cx as Cx>::Result;
-    fn on_fixpoint_overflow(
+    ) -> <Self::Cx as Cx>::Result;
+    fn fixpoint_overflow_result(
         cx: Self::Cx,
         input: <Self::Cx as Cx>::Input,
     ) -> <Self::Cx as Cx>::Result;
@@ -215,6 +213,27 @@ impl HeadUsages {
         let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = self;
         inductive == 0 && unknown == 0 && coinductive == 0 && forced_ambiguity == 0
     }
+
+    fn is_single(self, path_kind: PathKind) -> bool {
+        match path_kind {
+            PathKind::Inductive => matches!(
+                self,
+                HeadUsages { inductive: _, unknown: 0, coinductive: 0, forced_ambiguity: 0 },
+            ),
+            PathKind::Unknown => matches!(
+                self,
+                HeadUsages { inductive: 0, unknown: _, coinductive: 0, forced_ambiguity: 0 },
+            ),
+            PathKind::Coinductive => matches!(
+                self,
+                HeadUsages { inductive: 0, unknown: 0, coinductive: _, forced_ambiguity: 0 },
+            ),
+            PathKind::ForcedAmbiguity => matches!(
+                self,
+                HeadUsages { inductive: 0, unknown: 0, coinductive: 0, forced_ambiguity: _ },
+            ),
+        }
+    }
 }
 
 #[derive(Debug, Default)]
@@ -869,7 +888,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
         }
 
         debug!("encountered stack overflow");
-        D::on_stack_overflow(cx, input)
+        D::stack_overflow_result(cx, input)
     }
 
     /// When reevaluating a goal with a changed provisional result, all provisional cache entry
@@ -888,7 +907,29 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
             !entries.is_empty()
         });
     }
+}
 
+/// We need to rebase provisional cache entries when popping one of their cycle
+/// heads from the stack. This may not necessarily mean that we've actually
+/// reached a fixpoint for that cycle head, which impacts the way we rebase
+/// provisional cache entries.
+enum RebaseReason {
+    NoCycleUsages,
+    Ambiguity,
+    Overflow,
+    /// We've actually reached a fixpoint.
+    ///
+    /// This either happens in the first evaluation step for the cycle head.
+    /// In this case the used provisional result depends on the cycle `PathKind`.
+    /// We store this path kind to check whether the the provisional cache entry
+    /// we're rebasing relied on the same cycles.
+    ///
+    /// In later iterations cycles always return `stack_entry.provisional_result`
+    /// so we no longer depend on the `PathKind`. We store `None` in that case.
+    ReachedFixpoint(Option<PathKind>),
+}
+
+impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D, X> {
     /// A necessary optimization to handle complex solver cycles. A provisional cache entry
     /// relies on a set of cycle heads and the path towards these heads. When popping a cycle
     /// head from the stack after we've finished computing it, we can't be sure that the
@@ -908,8 +949,9 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
     /// to me.
     fn rebase_provisional_cache_entries(
         &mut self,
+        cx: X,
         stack_entry: &StackEntry<X>,
-        mut mutate_result: impl FnMut(X::Input, X::Result) -> X::Result,
+        rebase_reason: RebaseReason,
     ) {
         let popped_head_index = self.stack.next_index();
         #[allow(rustc::potential_query_instability)]
@@ -927,6 +969,10 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
                     return true;
                 };
 
+                let Some(new_highest_head_index) = heads.opt_highest_cycle_head_index() else {
+                    return false;
+                };
+
                 // We're rebasing an entry `e` over a head `p`. This head
                 // has a number of own heads `h` it depends on.
                 //
@@ -977,22 +1023,37 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
                         let eph = ep.extend_with_paths(ph);
                         heads.insert(head_index, eph, head.usages);
                     }
-                }
 
-                let Some(head_index) = heads.opt_highest_cycle_head_index() else {
-                    return false;
-                };
+                    // The provisional cache entry does depend on the provisional result
+                    // of the popped cycle head. We need to mutate the result of our
+                    // provisional cache entry in case we did not reach a fixpoint.
+                    match rebase_reason {
+                        // If the cycle head does not actually depend on itself, then
+                        // the provisional result used by the provisional cache entry
+                        // is not actually equal to the final provisional result. We
+                        // need to discard the provisional cache entry in this case.
+                        RebaseReason::NoCycleUsages => return false,
+                        RebaseReason::Ambiguity => {
+                            *result = D::propagate_ambiguity(cx, input, *result);
+                        }
+                        RebaseReason::Overflow => *result = D::fixpoint_overflow_result(cx, input),
+                        RebaseReason::ReachedFixpoint(None) => {}
+                        RebaseReason::ReachedFixpoint(Some(path_kind)) => {
+                            if !popped_head.usages.is_single(path_kind) {
+                                return false;
+                            }
+                        }
+                    };
+                }
 
                 // We now care about the path from the next highest cycle head to the
                 // provisional cache entry.
                 *path_from_head = path_from_head.extend(Self::cycle_path_kind(
                     &self.stack,
                     stack_entry.step_kind_from_parent,
-                    head_index,
+                    new_highest_head_index,
                 ));
-                // Mutate the result of the provisional cache entry in case we did
-                // not reach a fixpoint.
-                *result = mutate_result(input, *result);
+
                 true
             });
             !entries.is_empty()
@@ -1209,33 +1270,19 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
     /// Whether we've reached a fixpoint when evaluating a cycle head.
     fn reached_fixpoint(
         &mut self,
-        cx: X,
         stack_entry: &StackEntry<X>,
         usages: HeadUsages,
         result: X::Result,
-    ) -> bool {
+    ) -> Result<Option<PathKind>, ()> {
         let provisional_result = stack_entry.provisional_result;
-        if usages.is_empty() {
-            true
-        } else if let Some(provisional_result) = provisional_result {
-            provisional_result == result
+        if let Some(provisional_result) = provisional_result {
+            if provisional_result == result { Ok(None) } else { Err(()) }
+        } else if let Some(path_kind) = D::is_initial_provisional_result(result)
+            .filter(|&path_kind| usages.is_single(path_kind))
+        {
+            Ok(Some(path_kind))
         } else {
-            let check = |k| D::is_initial_provisional_result(cx, k, stack_entry.input, result);
-            match usages {
-                HeadUsages { inductive: _, unknown: 0, coinductive: 0, forced_ambiguity: 0 } => {
-                    check(PathKind::Inductive)
-                }
-                HeadUsages { inductive: 0, unknown: _, coinductive: 0, forced_ambiguity: 0 } => {
-                    check(PathKind::Unknown)
-                }
-                HeadUsages { inductive: 0, unknown: 0, coinductive: _, forced_ambiguity: 0 } => {
-                    check(PathKind::Coinductive)
-                }
-                HeadUsages { inductive: 0, unknown: 0, coinductive: 0, forced_ambiguity: _ } => {
-                    check(PathKind::ForcedAmbiguity)
-                }
-                _ => false,
-            }
+            Err(())
         }
     }
 
@@ -1280,8 +1327,19 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
             // is equal to the provisional result of the previous iteration, or because
             // this was only the head of either coinductive or inductive cycles, and the
             // final result is equal to the initial response for that case.
-            if self.reached_fixpoint(cx, &stack_entry, usages, result) {
-                self.rebase_provisional_cache_entries(&stack_entry, |_, result| result);
+            if let Ok(fixpoint) = self.reached_fixpoint(&stack_entry, usages, result) {
+                self.rebase_provisional_cache_entries(
+                    cx,
+                    &stack_entry,
+                    RebaseReason::ReachedFixpoint(fixpoint),
+                );
+                return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
+            } else if usages.is_empty() {
+                self.rebase_provisional_cache_entries(
+                    cx,
+                    &stack_entry,
+                    RebaseReason::NoCycleUsages,
+                );
                 return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
             }
 
@@ -1298,9 +1356,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
             // we also taint all provisional cache entries which depend on the
             // current goal.
             if D::is_ambiguous_result(result) {
-                self.rebase_provisional_cache_entries(&stack_entry, |input, _| {
-                    D::propagate_ambiguity(cx, input, result)
-                });
+                self.rebase_provisional_cache_entries(cx, &stack_entry, RebaseReason::Ambiguity);
                 return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
             };
 
@@ -1309,10 +1365,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
             i += 1;
             if i >= D::FIXPOINT_STEP_LIMIT {
                 debug!("canonical cycle overflow");
-                let result = D::on_fixpoint_overflow(cx, input);
-                self.rebase_provisional_cache_entries(&stack_entry, |input, _| {
-                    D::on_fixpoint_overflow(cx, input)
-                });
+                let result = D::fixpoint_overflow_result(cx, input);
+                self.rebase_provisional_cache_entries(cx, &stack_entry, RebaseReason::Overflow);
                 return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
             }