about summary refs log tree commit diff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc_arena/src/lib.rs68
-rw-r--r--compiler/rustc_ast/src/ast.rs2
-rw-r--r--compiler/rustc_ast/src/lib.rs2
-rw-r--r--compiler/rustc_ast/src/mut_visit.rs140
-rw-r--r--compiler/rustc_ast/src/tokenstream.rs383
-rw-r--r--compiler/rustc_ast/src/visit.rs1
-rw-r--r--compiler/rustc_ast_lowering/src/pat.rs5
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state.rs44
-rw-r--r--compiler/rustc_attr_parsing/src/lib.rs1
-rw-r--r--compiler/rustc_builtin_macros/src/autodiff.rs4
-rw-r--r--compiler/rustc_builtin_macros/src/lib.rs1
-rw-r--r--compiler/rustc_builtin_macros/src/pattern_type.rs36
-rw-r--r--compiler/rustc_codegen_cranelift/src/base.rs39
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/aot.rs42
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/jit.rs12
-rw-r--r--compiler/rustc_codegen_cranelift/src/global_asm.rs203
-rw-r--r--compiler/rustc_codegen_cranelift/src/inline_asm.rs193
-rw-r--r--compiler/rustc_codegen_gcc/src/asm.rs2
-rw-r--r--compiler/rustc_codegen_gcc/src/base.rs6
-rw-r--r--compiler/rustc_codegen_gcc/src/builder.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/base.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs10
-rw-r--r--compiler/rustc_codegen_ssa/src/base.rs67
-rw-r--r--compiler/rustc_codegen_ssa/src/codegen_attrs.rs30
-rw-r--r--compiler/rustc_codegen_ssa/src/lib.rs1
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs7
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/naked_asm.rs50
-rw-r--r--compiler/rustc_codegen_ssa/src/mono_item.rs97
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/asm.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/backend.rs9
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs18
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs8
-rw-r--r--compiler/rustc_expand/src/build.rs14
-rw-r--r--compiler/rustc_expand/src/config.rs8
-rw-r--r--compiler/rustc_expand/src/mbe/transcribe.rs32
-rw-r--r--compiler/rustc_hir/src/hir.rs3
-rw-r--r--compiler/rustc_hir/src/intravisit.rs1
-rw-r--r--compiler/rustc_hir/src/lib.rs1
-rw-r--r--compiler/rustc_hir_analysis/src/collect/type_of.rs8
-rw-r--r--compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs60
-rw-r--r--compiler/rustc_hir_analysis/src/variance/constraints.rs26
-rw-r--r--compiler/rustc_hir_pretty/src/lib.rs14
-rw-r--r--compiler/rustc_hir_typeck/src/callee.rs78
-rw-r--r--compiler/rustc_hir_typeck/src/expr.rs56
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs56
-rw-r--r--compiler/rustc_hir_typeck/src/method/mod.rs17
-rw-r--r--compiler/rustc_hir_typeck/src/method/probe.rs2
-rw-r--r--compiler/rustc_hir_typeck/src/op.rs5
-rw-r--r--compiler/rustc_hir_typeck/src/place_op.rs18
-rw-r--r--compiler/rustc_hir_typeck/src/writeback.rs58
-rw-r--r--compiler/rustc_lint/src/nonstandard_style.rs10
-rw-r--r--compiler/rustc_lint/src/types.rs96
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp13
-rw-r--r--compiler/rustc_macros/src/query.rs18
-rw-r--r--compiler/rustc_middle/src/dep_graph/dep_node.rs11
-rw-r--r--compiler/rustc_middle/src/lib.rs1
-rw-r--r--compiler/rustc_middle/src/mir/interpret/queries.rs19
-rw-r--r--compiler/rustc_middle/src/query/mod.rs2
-rw-r--r--compiler/rustc_middle/src/query/plumbing.rs7
-rw-r--r--compiler/rustc_middle/src/ty/codec.rs10
-rw-r--r--compiler/rustc_middle/src/ty/context.rs12
-rw-r--r--compiler/rustc_middle/src/ty/mod.rs1
-rw-r--r--compiler/rustc_middle/src/ty/pattern.rs27
-rw-r--r--compiler/rustc_middle/src/ty/relate.rs12
-rw-r--r--compiler/rustc_middle/src/ty/structural_impls.rs3
-rw-r--r--compiler/rustc_middle/src/ty/typeck_results.rs2
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drop.rs2
-rw-r--r--compiler/rustc_mir_transform/src/jump_threading.rs12
-rw-r--r--compiler/rustc_mir_transform/src/patch.rs22
-rw-r--r--compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs3
-rw-r--r--compiler/rustc_parse/src/lib.rs2
-rw-r--r--compiler/rustc_parse/src/parser/attr.rs4
-rw-r--r--compiler/rustc_parse/src/parser/attr_wrapper.rs194
-rw-r--r--compiler/rustc_parse/src/parser/mod.rs229
-rw-r--r--compiler/rustc_parse/src/parser/mut_visit/tests.rs65
-rw-r--r--compiler/rustc_parse/src/parser/tests.rs6
-rw-r--r--compiler/rustc_query_impl/src/lib.rs2
-rw-r--r--compiler/rustc_query_impl/src/plumbing.rs9
-rw-r--r--compiler/rustc_resolve/src/late.rs5
-rw-r--r--compiler/rustc_resolve/src/lib.rs1
-rw-r--r--compiler/rustc_smir/src/rustc_smir/convert/ty.rs1
-rw-r--r--compiler/rustc_symbol_mangling/src/v0.rs38
-rw-r--r--compiler/rustc_target/src/spec/targets/armv5te_unknown_linux_gnueabi.rs1
-rw-r--r--compiler/rustc_target/src/spec/targets/armv7_unknown_linux_gnueabihf.rs1
-rw-r--r--compiler/rustc_trait_selection/Cargo.toml1
-rw-r--r--compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs3
-rw-r--r--compiler/rustc_trait_selection/src/error_reporting/infer/region.rs5
-rw-r--r--compiler/rustc_trait_selection/src/error_reporting/traits/overflow.rs3
-rw-r--r--compiler/rustc_trait_selection/src/solve.rs2
-rw-r--r--compiler/rustc_trait_selection/src/solve/delegate.rs8
-rw-r--r--compiler/rustc_trait_selection/src/solve/fulfill.rs3
-rw-r--r--compiler/rustc_trait_selection/src/solve/fulfill/derive_errors.rs2
-rw-r--r--compiler/rustc_trait_selection/src/solve/normalize.rs57
-rw-r--r--compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/effects.rs7
-rw-r--r--compiler/rustc_trait_selection/src/traits/engine.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/normalize.rs13
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs3
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/confirmation.rs3
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/mod.rs3
-rw-r--r--compiler/rustc_trait_selection/src/traits/specialize/mod.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/wf.rs84
-rw-r--r--compiler/rustc_transmute/Cargo.toml6
-rw-r--r--compiler/rustc_transmute/src/layout/dfa.rs301
-rw-r--r--compiler/rustc_transmute/src/layout/mod.rs61
-rw-r--r--compiler/rustc_transmute/src/maybe_transmutable/mod.rs346
-rw-r--r--compiler/rustc_transmute/src/maybe_transmutable/tests.rs25
-rw-r--r--compiler/rustc_ty_utils/src/layout.rs86
-rw-r--r--compiler/rustc_type_ir/src/canonical.rs2
-rw-r--r--compiler/rustc_type_ir/src/flags.rs6
-rw-r--r--compiler/rustc_type_ir/src/interner.rs7
-rw-r--r--compiler/rustc_type_ir/src/pattern.rs1
-rw-r--r--compiler/rustc_type_ir/src/walk.rs21
116 files changed, 1938 insertions, 1928 deletions
diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs
index 6aaac072e4b..d3b7e679d17 100644
--- a/compiler/rustc_arena/src/lib.rs
+++ b/compiler/rustc_arena/src/lib.rs
@@ -21,8 +21,10 @@
 #![feature(decl_macro)]
 #![feature(dropck_eyepatch)]
 #![feature(maybe_uninit_slice)]
+#![feature(never_type)]
 #![feature(rustc_attrs)]
 #![feature(rustdoc_internals)]
+#![feature(unwrap_infallible)]
 // tidy-alphabetical-end
 
 use std::alloc::Layout;
@@ -200,6 +202,18 @@ impl<T> TypedArena<T> {
     /// storing the elements in the arena.
     #[inline]
     pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
+        self.try_alloc_from_iter(iter.into_iter().map(Ok::<T, !>)).into_ok()
+    }
+
+    /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
+    ///
+    /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
+    /// storing the elements in the arena.
+    #[inline]
+    pub fn try_alloc_from_iter<E>(
+        &self,
+        iter: impl IntoIterator<Item = Result<T, E>>,
+    ) -> Result<&mut [T], E> {
         // Despite the similarlty with `DroplessArena`, we cannot reuse their fast case. The reason
         // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a
         // reference to `self` and adding elements to the arena during iteration.
@@ -214,18 +228,19 @@ impl<T> TypedArena<T> {
         // doesn't need to be hyper-optimized.
         assert!(size_of::<T>() != 0);
 
-        let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
+        let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
+        let mut vec = vec?;
         if vec.is_empty() {
-            return &mut [];
+            return Ok(&mut []);
         }
         // Move the content to the arena by copying and then forgetting it.
         let len = vec.len();
         let start_ptr = self.alloc_raw_slice(len);
-        unsafe {
+        Ok(unsafe {
             vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
             vec.set_len(0);
             slice::from_raw_parts_mut(start_ptr, len)
-        }
+        })
     }
 
     /// Grows the arena.
@@ -566,27 +581,34 @@ impl DroplessArena {
                 // `drop`.
                 unsafe { self.write_from_iter(iter, len, mem) }
             }
-            (_, _) => {
-                outline(move || -> &mut [T] {
-                    // Takes care of reentrancy.
-                    let mut vec: SmallVec<[_; 8]> = iter.collect();
-                    if vec.is_empty() {
-                        return &mut [];
-                    }
-                    // Move the content to the arena by copying it and then forgetting
-                    // the content of the SmallVec
-                    unsafe {
-                        let len = vec.len();
-                        let start_ptr =
-                            self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
-                        vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
-                        vec.set_len(0);
-                        slice::from_raw_parts_mut(start_ptr, len)
-                    }
-                })
-            }
+            (_, _) => outline(move || self.try_alloc_from_iter(iter.map(Ok::<T, !>)).into_ok()),
         }
     }
+
+    #[inline]
+    pub fn try_alloc_from_iter<T, E>(
+        &self,
+        iter: impl IntoIterator<Item = Result<T, E>>,
+    ) -> Result<&mut [T], E> {
+        // Despite the similarlty with `alloc_from_iter`, we cannot reuse their fast case, as we
+        // cannot know the minimum length of the iterator in this case.
+        assert!(size_of::<T>() != 0);
+
+        // Takes care of reentrancy.
+        let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
+        let mut vec = vec?;
+        if vec.is_empty() {
+            return Ok(&mut []);
+        }
+        // Move the content to the arena by copying and then forgetting it.
+        let len = vec.len();
+        Ok(unsafe {
+            let start_ptr = self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
+            vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
+            vec.set_len(0);
+            slice::from_raw_parts_mut(start_ptr, len)
+        })
+    }
 }
 
 /// Declare an `Arena` containing one dropless arena and many typed arenas (the
diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs
index 8986430141b..9d216ef3dd8 100644
--- a/compiler/rustc_ast/src/ast.rs
+++ b/compiler/rustc_ast/src/ast.rs
@@ -2469,6 +2469,8 @@ pub enum TyPatKind {
     /// A range pattern (e.g., `1...2`, `1..2`, `1..`, `..2`, `1..=2`, `..=2`).
     Range(Option<P<AnonConst>>, Option<P<AnonConst>>, Spanned<RangeEnd>),
 
+    Or(ThinVec<P<TyPat>>),
+
     /// Placeholder for a pattern that wasn't syntactically well formed in some way.
     Err(ErrorGuaranteed),
 }
diff --git a/compiler/rustc_ast/src/lib.rs b/compiler/rustc_ast/src/lib.rs
index 1471262d2d6..e572ec99dab 100644
--- a/compiler/rustc_ast/src/lib.rs
+++ b/compiler/rustc_ast/src/lib.rs
@@ -12,6 +12,7 @@
     test(attr(deny(warnings)))
 )]
 #![doc(rust_logo)]
+#![feature(array_windows)]
 #![feature(associated_type_defaults)]
 #![feature(box_patterns)]
 #![feature(if_let_guard)]
@@ -19,6 +20,7 @@
 #![feature(never_type)]
 #![feature(rustdoc_internals)]
 #![feature(stmt_expr_attributes)]
+#![recursion_limit = "256"]
 // tidy-alphabetical-end
 
 pub mod util {
diff --git a/compiler/rustc_ast/src/mut_visit.rs b/compiler/rustc_ast/src/mut_visit.rs
index 6aae2e481a5..e49886721e3 100644
--- a/compiler/rustc_ast/src/mut_visit.rs
+++ b/compiler/rustc_ast/src/mut_visit.rs
@@ -9,7 +9,6 @@
 
 use std::ops::DerefMut;
 use std::panic;
-use std::sync::Arc;
 
 use rustc_data_structures::flat_map_in_place::FlatMapInPlace;
 use rustc_data_structures::stack::ensure_sufficient_stack;
@@ -20,7 +19,6 @@ use thin_vec::ThinVec;
 
 use crate::ast::*;
 use crate::ptr::P;
-use crate::token::{self, Token};
 use crate::tokenstream::*;
 use crate::visit::{AssocCtxt, BoundKind, FnCtxt};
 
@@ -48,11 +46,6 @@ pub trait WalkItemKind {
 }
 
 pub trait MutVisitor: Sized {
-    /// Mutable token visiting only exists for the `macro_rules` token marker and should not be
-    /// used otherwise. Token visitor would be entirely separate from the regular visitor if
-    /// the marker didn't have to visit AST fragments in nonterminal tokens.
-    const VISIT_TOKENS: bool = false;
-
     // Methods in this trait have one of three forms:
     //
     //   fn visit_t(&mut self, t: &mut T);                      // common
@@ -360,6 +353,8 @@ pub trait MutVisitor: Sized {
         // Do nothing.
     }
 
+    // Span visiting is no longer used, but we keep it for now,
+    // in case it's needed for something like #127241.
     fn visit_span(&mut self, _sp: &mut Span) {
         // Do nothing.
     }
@@ -473,12 +468,8 @@ fn visit_attr_args<T: MutVisitor>(vis: &mut T, args: &mut AttrArgs) {
 
 // No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
 fn visit_delim_args<T: MutVisitor>(vis: &mut T, args: &mut DelimArgs) {
-    let DelimArgs { dspan, delim: _, tokens } = args;
-    visit_tts(vis, tokens);
-    visit_delim_span(vis, dspan);
-}
-
-pub fn visit_delim_span<T: MutVisitor>(vis: &mut T, DelimSpan { open, close }: &mut DelimSpan) {
+    let DelimArgs { dspan, delim: _, tokens: _ } = args;
+    let DelimSpan { open, close } = dspan;
     vis.visit_span(open);
     vis.visit_span(close);
 }
@@ -552,7 +543,7 @@ fn walk_assoc_item_constraint<T: MutVisitor>(
 }
 
 pub fn walk_ty<T: MutVisitor>(vis: &mut T, ty: &mut P<Ty>) {
-    let Ty { id, kind, span, tokens } = ty.deref_mut();
+    let Ty { id, kind, span, tokens: _ } = ty.deref_mut();
     vis.visit_id(id);
     match kind {
         TyKind::Err(_guar) => {}
@@ -600,21 +591,20 @@ pub fn walk_ty<T: MutVisitor>(vis: &mut T, ty: &mut P<Ty>) {
         }
         TyKind::MacCall(mac) => vis.visit_mac_call(mac),
     }
-    visit_lazy_tts(vis, tokens);
     vis.visit_span(span);
 }
 
 pub fn walk_ty_pat<T: MutVisitor>(vis: &mut T, ty: &mut P<TyPat>) {
-    let TyPat { id, kind, span, tokens } = ty.deref_mut();
+    let TyPat { id, kind, span, tokens: _ } = ty.deref_mut();
     vis.visit_id(id);
     match kind {
         TyPatKind::Range(start, end, _include_end) => {
             visit_opt(start, |c| vis.visit_anon_const(c));
             visit_opt(end, |c| vis.visit_anon_const(c));
         }
+        TyPatKind::Or(variants) => visit_thin_vec(variants, |p| vis.visit_ty_pat(p)),
         TyPatKind::Err(_) => {}
     }
-    visit_lazy_tts(vis, tokens);
     vis.visit_span(span);
 }
 
@@ -654,11 +644,10 @@ fn walk_path_segment<T: MutVisitor>(vis: &mut T, segment: &mut PathSegment) {
     visit_opt(args, |args| vis.visit_generic_args(args));
 }
 
-fn walk_path<T: MutVisitor>(vis: &mut T, Path { segments, span, tokens }: &mut Path) {
+fn walk_path<T: MutVisitor>(vis: &mut T, Path { segments, span, tokens: _ }: &mut Path) {
     for segment in segments {
         vis.visit_path_segment(segment);
     }
-    visit_lazy_tts(vis, tokens);
     vis.visit_span(span);
 }
 
@@ -704,7 +693,7 @@ fn walk_parenthesized_parameter_data<T: MutVisitor>(vis: &mut T, args: &mut Pare
 }
 
 fn walk_local<T: MutVisitor>(vis: &mut T, local: &mut P<Local>) {
-    let Local { id, super_, pat, ty, kind, span, colon_sp, attrs, tokens } = local.deref_mut();
+    let Local { id, super_, pat, ty, kind, span, colon_sp, attrs, tokens: _ } = local.deref_mut();
     visit_opt(super_, |sp| vis.visit_span(sp));
     vis.visit_id(id);
     visit_attrs(vis, attrs);
@@ -720,7 +709,6 @@ fn walk_local<T: MutVisitor>(vis: &mut T, local: &mut P<Local>) {
             vis.visit_block(els);
         }
     }
-    visit_lazy_tts(vis, tokens);
     visit_opt(colon_sp, |sp| vis.visit_span(sp));
     vis.visit_span(span);
 }
@@ -729,14 +717,10 @@ fn walk_attribute<T: MutVisitor>(vis: &mut T, attr: &mut Attribute) {
     let Attribute { kind, id: _, style: _, span } = attr;
     match kind {
         AttrKind::Normal(normal) => {
-            let NormalAttr {
-                item: AttrItem { unsafety: _, path, args, tokens },
-                tokens: attr_tokens,
-            } = &mut **normal;
+            let NormalAttr { item: AttrItem { unsafety: _, path, args, tokens: _ }, tokens: _ } =
+                &mut **normal;
             vis.visit_path(path);
             visit_attr_args(vis, args);
-            visit_lazy_tts(vis, tokens);
-            visit_lazy_tts(vis, attr_tokens);
         }
         AttrKind::DocComment(_kind, _sym) => {}
     }
@@ -786,90 +770,6 @@ pub fn walk_flat_map_param<T: MutVisitor>(vis: &mut T, mut param: Param) -> Smal
 }
 
 // No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
-fn visit_attr_tt<T: MutVisitor>(vis: &mut T, tt: &mut AttrTokenTree) {
-    match tt {
-        AttrTokenTree::Token(token, _spacing) => {
-            visit_token(vis, token);
-        }
-        AttrTokenTree::Delimited(dspan, _spacing, _delim, tts) => {
-            visit_attr_tts(vis, tts);
-            visit_delim_span(vis, dspan);
-        }
-        AttrTokenTree::AttrsTarget(AttrsTarget { attrs, tokens }) => {
-            visit_attrs(vis, attrs);
-            visit_lazy_tts_opt_mut(vis, Some(tokens));
-        }
-    }
-}
-
-// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
-fn visit_tt<T: MutVisitor>(vis: &mut T, tt: &mut TokenTree) {
-    match tt {
-        TokenTree::Token(token, _spacing) => {
-            visit_token(vis, token);
-        }
-        TokenTree::Delimited(dspan, _spacing, _delim, tts) => {
-            visit_tts(vis, tts);
-            visit_delim_span(vis, dspan);
-        }
-    }
-}
-
-// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
-fn visit_tts<T: MutVisitor>(vis: &mut T, TokenStream(tts): &mut TokenStream) {
-    if T::VISIT_TOKENS && !tts.is_empty() {
-        let tts = Arc::make_mut(tts);
-        visit_vec(tts, |tree| visit_tt(vis, tree));
-    }
-}
-
-fn visit_attr_tts<T: MutVisitor>(vis: &mut T, AttrTokenStream(tts): &mut AttrTokenStream) {
-    if T::VISIT_TOKENS && !tts.is_empty() {
-        let tts = Arc::make_mut(tts);
-        visit_vec(tts, |tree| visit_attr_tt(vis, tree));
-    }
-}
-
-fn visit_lazy_tts_opt_mut<T: MutVisitor>(vis: &mut T, lazy_tts: Option<&mut LazyAttrTokenStream>) {
-    if T::VISIT_TOKENS {
-        if let Some(lazy_tts) = lazy_tts {
-            let mut tts = lazy_tts.to_attr_token_stream();
-            visit_attr_tts(vis, &mut tts);
-            *lazy_tts = LazyAttrTokenStream::new(tts);
-        }
-    }
-}
-
-fn visit_lazy_tts<T: MutVisitor>(vis: &mut T, lazy_tts: &mut Option<LazyAttrTokenStream>) {
-    visit_lazy_tts_opt_mut(vis, lazy_tts.as_mut());
-}
-
-/// Applies ident visitor if it's an ident. In practice this is not actually
-/// used by specific visitors right now, but there's a test below checking that
-/// it works.
-// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
-pub fn visit_token<T: MutVisitor>(vis: &mut T, t: &mut Token) {
-    let Token { kind, span } = t;
-    match kind {
-        token::Ident(name, _is_raw) | token::Lifetime(name, _is_raw) => {
-            let mut ident = Ident::new(*name, *span);
-            vis.visit_ident(&mut ident);
-            *name = ident.name;
-            *span = ident.span;
-            return; // Avoid visiting the span for the second time.
-        }
-        token::NtIdent(ident, _is_raw) => {
-            vis.visit_ident(ident);
-        }
-        token::NtLifetime(ident, _is_raw) => {
-            vis.visit_ident(ident);
-        }
-        _ => {}
-    }
-    vis.visit_span(span);
-}
-
-// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
 fn visit_defaultness<T: MutVisitor>(vis: &mut T, defaultness: &mut Defaultness) {
     match defaultness {
         Defaultness::Default(span) => vis.visit_span(span),
@@ -1187,10 +1087,9 @@ fn walk_mt<T: MutVisitor>(vis: &mut T, MutTy { ty, mutbl: _ }: &mut MutTy) {
 }
 
 pub fn walk_block<T: MutVisitor>(vis: &mut T, block: &mut P<Block>) {
-    let Block { id, stmts, rules: _, span, tokens } = block.deref_mut();
+    let Block { id, stmts, rules: _, span, tokens: _ } = block.deref_mut();
     vis.visit_id(id);
     stmts.flat_map_in_place(|stmt| vis.flat_map_stmt(stmt));
-    visit_lazy_tts(vis, tokens);
     vis.visit_span(span);
 }
 
@@ -1471,12 +1370,11 @@ fn walk_item_ctxt<K: WalkItemKind>(
     item: &mut P<Item<K>>,
     ctxt: K::Ctxt,
 ) {
-    let Item { attrs, id, kind, vis, span, tokens } = item.deref_mut();
+    let Item { attrs, id, kind, vis, span, tokens: _ } = item.deref_mut();
     visitor.visit_id(id);
     visit_attrs(visitor, attrs);
     visitor.visit_vis(vis);
     kind.walk(*span, *id, vis, ctxt, visitor);
-    visit_lazy_tts(visitor, tokens);
     visitor.visit_span(span);
 }
 
@@ -1550,7 +1448,7 @@ impl WalkItemKind for ForeignItemKind {
 }
 
 pub fn walk_pat<T: MutVisitor>(vis: &mut T, pat: &mut P<Pat>) {
-    let Pat { id, kind, span, tokens } = pat.deref_mut();
+    let Pat { id, kind, span, tokens: _ } = pat.deref_mut();
     vis.visit_id(id);
     match kind {
         PatKind::Err(_guar) => {}
@@ -1592,7 +1490,6 @@ pub fn walk_pat<T: MutVisitor>(vis: &mut T, pat: &mut P<Pat>) {
         PatKind::Paren(inner) => vis.visit_pat(inner),
         PatKind::MacCall(mac) => vis.visit_mac_call(mac),
     }
-    visit_lazy_tts(vis, tokens);
     vis.visit_span(span);
 }
 
@@ -1656,7 +1553,7 @@ fn walk_format_args<T: MutVisitor>(vis: &mut T, fmt: &mut FormatArgs) {
     vis.visit_span(span);
 }
 
-pub fn walk_expr<T: MutVisitor>(vis: &mut T, Expr { kind, id, span, attrs, tokens }: &mut Expr) {
+pub fn walk_expr<T: MutVisitor>(vis: &mut T, Expr { kind, id, span, attrs, tokens: _ }: &mut Expr) {
     vis.visit_id(id);
     visit_attrs(vis, attrs);
     match kind {
@@ -1847,7 +1744,6 @@ pub fn walk_expr<T: MutVisitor>(vis: &mut T, Expr { kind, id, span, attrs, token
         ExprKind::Err(_guar) => {}
         ExprKind::Dummy => {}
     }
-    visit_lazy_tts(vis, tokens);
     vis.visit_span(span);
 }
 
@@ -1889,17 +1785,16 @@ fn walk_flat_map_stmt_kind<T: MutVisitor>(vis: &mut T, kind: StmtKind) -> SmallV
         StmtKind::Semi(expr) => vis.filter_map_expr(expr).into_iter().map(StmtKind::Semi).collect(),
         StmtKind::Empty => smallvec![StmtKind::Empty],
         StmtKind::MacCall(mut mac) => {
-            let MacCallStmt { mac: mac_, style: _, attrs, tokens } = mac.deref_mut();
+            let MacCallStmt { mac: mac_, style: _, attrs, tokens: _ } = mac.deref_mut();
             visit_attrs(vis, attrs);
             vis.visit_mac_call(mac_);
-            visit_lazy_tts(vis, tokens);
             smallvec![StmtKind::MacCall(mac)]
         }
     }
 }
 
 fn walk_vis<T: MutVisitor>(vis: &mut T, visibility: &mut Visibility) {
-    let Visibility { kind, span, tokens } = visibility;
+    let Visibility { kind, span, tokens: _ } = visibility;
     match kind {
         VisibilityKind::Public | VisibilityKind::Inherited => {}
         VisibilityKind::Restricted { path, id, shorthand: _ } => {
@@ -1907,7 +1802,6 @@ fn walk_vis<T: MutVisitor>(vis: &mut T, visibility: &mut Visibility) {
             vis.visit_path(path);
         }
     }
-    visit_lazy_tts(vis, tokens);
     vis.visit_span(span);
 }
 
diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs
index 43d25d18075..636c26bcde0 100644
--- a/compiler/rustc_ast/src/tokenstream.rs
+++ b/compiler/rustc_ast/src/tokenstream.rs
@@ -14,14 +14,16 @@
 //! ownership of the original.
 
 use std::borrow::Cow;
+use std::ops::Range;
 use std::sync::Arc;
-use std::{cmp, fmt, iter};
+use std::{cmp, fmt, iter, mem};
 
 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
 use rustc_data_structures::sync;
 use rustc_macros::{Decodable, Encodable, HashStable_Generic};
 use rustc_serialize::{Decodable, Encodable};
 use rustc_span::{DUMMY_SP, Span, SpanDecoder, SpanEncoder, Symbol, sym};
+use thin_vec::ThinVec;
 
 use crate::ast::AttrStyle;
 use crate::ast_traits::{HasAttrs, HasTokens};
@@ -106,25 +108,30 @@ where
     }
 }
 
-pub trait ToAttrTokenStream: sync::DynSend + sync::DynSync {
-    fn to_attr_token_stream(&self) -> AttrTokenStream;
-}
-
-impl ToAttrTokenStream for AttrTokenStream {
-    fn to_attr_token_stream(&self) -> AttrTokenStream {
-        self.clone()
-    }
-}
-
-/// A lazy version of [`TokenStream`], which defers creation
-/// of an actual `TokenStream` until it is needed.
-/// `Box` is here only to reduce the structure size.
+/// A lazy version of [`AttrTokenStream`], which defers creation of an actual
+/// `AttrTokenStream` until it is needed.
 #[derive(Clone)]
-pub struct LazyAttrTokenStream(Arc<Box<dyn ToAttrTokenStream>>);
+pub struct LazyAttrTokenStream(Arc<LazyAttrTokenStreamInner>);
 
 impl LazyAttrTokenStream {
-    pub fn new(inner: impl ToAttrTokenStream + 'static) -> LazyAttrTokenStream {
-        LazyAttrTokenStream(Arc::new(Box::new(inner)))
+    pub fn new_direct(stream: AttrTokenStream) -> LazyAttrTokenStream {
+        LazyAttrTokenStream(Arc::new(LazyAttrTokenStreamInner::Direct(stream)))
+    }
+
+    pub fn new_pending(
+        start_token: (Token, Spacing),
+        cursor_snapshot: TokenCursor,
+        num_calls: u32,
+        break_last_token: u32,
+        node_replacements: ThinVec<NodeReplacement>,
+    ) -> LazyAttrTokenStream {
+        LazyAttrTokenStream(Arc::new(LazyAttrTokenStreamInner::Pending {
+            start_token,
+            cursor_snapshot,
+            num_calls,
+            break_last_token,
+            node_replacements,
+        }))
     }
 
     pub fn to_attr_token_stream(&self) -> AttrTokenStream {
@@ -156,6 +163,184 @@ impl<CTX> HashStable<CTX> for LazyAttrTokenStream {
     }
 }
 
+/// A token range within a `Parser`'s full token stream.
+#[derive(Clone, Debug)]
+pub struct ParserRange(pub Range<u32>);
+
+/// A token range within an individual AST node's (lazy) token stream, i.e.
+/// relative to that node's first token. Distinct from `ParserRange` so the two
+/// kinds of range can't be mixed up.
+#[derive(Clone, Debug)]
+pub struct NodeRange(pub Range<u32>);
+
+/// Indicates a range of tokens that should be replaced by an `AttrsTarget`
+/// (replacement) or be replaced by nothing (deletion). This is used in two
+/// places during token collection.
+///
+/// 1. Replacement. During the parsing of an AST node that may have a
+///    `#[derive]` attribute, when we parse a nested AST node that has `#[cfg]`
+///    or `#[cfg_attr]`, we replace the entire inner AST node with
+///    `FlatToken::AttrsTarget`. This lets us perform eager cfg-expansion on an
+///    `AttrTokenStream`.
+///
+/// 2. Deletion. We delete inner attributes from all collected token streams,
+///    and instead track them through the `attrs` field on the AST node. This
+///    lets us manipulate them similarly to outer attributes. When we create a
+///    `TokenStream`, the inner attributes are inserted into the proper place
+///    in the token stream.
+///
+/// Each replacement starts off in `ParserReplacement` form but is converted to
+/// `NodeReplacement` form when it is attached to a single AST node, via
+/// `LazyAttrTokenStreamImpl`.
+pub type ParserReplacement = (ParserRange, Option<AttrsTarget>);
+
+/// See the comment on `ParserReplacement`.
+pub type NodeReplacement = (NodeRange, Option<AttrsTarget>);
+
+impl NodeRange {
+    // Converts a range within a parser's tokens to a range within a
+    // node's tokens beginning at `start_pos`.
+    //
+    // For example, imagine a parser with 50 tokens in its token stream, a
+    // function that spans `ParserRange(20..40)` and an inner attribute within
+    // that function that spans `ParserRange(30..35)`. We would find the inner
+    // attribute's range within the function's tokens by subtracting 20, which
+    // is the position of the function's start token. This gives
+    // `NodeRange(10..15)`.
+    pub fn new(ParserRange(parser_range): ParserRange, start_pos: u32) -> NodeRange {
+        assert!(!parser_range.is_empty());
+        assert!(parser_range.start >= start_pos);
+        NodeRange((parser_range.start - start_pos)..(parser_range.end - start_pos))
+    }
+}
+
+enum LazyAttrTokenStreamInner {
+    // The token stream has already been produced.
+    Direct(AttrTokenStream),
+
+    // From a value of this type we can reconstruct the `TokenStream` seen by
+    // the `f` callback passed to a call to `Parser::collect_tokens`, by
+    // replaying the getting of the tokens. This saves us producing a
+    // `TokenStream` if it is never needed, e.g. a captured `macro_rules!`
+    // argument that is never passed to a proc macro. In practice, token stream
+    // creation happens rarely compared to calls to `collect_tokens` (see some
+    // statistics in #78736) so we are doing as little up-front work as
+    // possible.
+    //
+    // This also makes `Parser` very cheap to clone, since there is no
+    // intermediate collection buffer to clone.
+    Pending {
+        start_token: (Token, Spacing),
+        cursor_snapshot: TokenCursor,
+        num_calls: u32,
+        break_last_token: u32,
+        node_replacements: ThinVec<NodeReplacement>,
+    },
+}
+
+impl LazyAttrTokenStreamInner {
+    fn to_attr_token_stream(&self) -> AttrTokenStream {
+        match self {
+            LazyAttrTokenStreamInner::Direct(stream) => stream.clone(),
+            LazyAttrTokenStreamInner::Pending {
+                start_token,
+                cursor_snapshot,
+                num_calls,
+                break_last_token,
+                node_replacements,
+            } => {
+                // The token produced by the final call to `{,inlined_}next` was not
+                // actually consumed by the callback. The combination of chaining the
+                // initial token and using `take` produces the desired result - we
+                // produce an empty `TokenStream` if no calls were made, and omit the
+                // final token otherwise.
+                let mut cursor_snapshot = cursor_snapshot.clone();
+                let tokens = iter::once(FlatToken::Token(*start_token))
+                    .chain(iter::repeat_with(|| FlatToken::Token(cursor_snapshot.next())))
+                    .take(*num_calls as usize);
+
+                if node_replacements.is_empty() {
+                    make_attr_token_stream(tokens, *break_last_token)
+                } else {
+                    let mut tokens: Vec<_> = tokens.collect();
+                    let mut node_replacements = node_replacements.to_vec();
+                    node_replacements.sort_by_key(|(range, _)| range.0.start);
+
+                    #[cfg(debug_assertions)]
+                    for [(node_range, tokens), (next_node_range, next_tokens)] in
+                        node_replacements.array_windows()
+                    {
+                        assert!(
+                            node_range.0.end <= next_node_range.0.start
+                                || node_range.0.end >= next_node_range.0.end,
+                            "Node ranges should be disjoint or nested: ({:?}, {:?}) ({:?}, {:?})",
+                            node_range,
+                            tokens,
+                            next_node_range,
+                            next_tokens,
+                        );
+                    }
+
+                    // Process the replace ranges, starting from the highest start
+                    // position and working our way back. If have tokens like:
+                    //
+                    // `#[cfg(FALSE)] struct Foo { #[cfg(FALSE)] field: bool }`
+                    //
+                    // Then we will generate replace ranges for both
+                    // the `#[cfg(FALSE)] field: bool` and the entire
+                    // `#[cfg(FALSE)] struct Foo { #[cfg(FALSE)] field: bool }`
+                    //
+                    // By starting processing from the replace range with the greatest
+                    // start position, we ensure that any (outer) replace range which
+                    // encloses another (inner) replace range will fully overwrite the
+                    // inner range's replacement.
+                    for (node_range, target) in node_replacements.into_iter().rev() {
+                        assert!(
+                            !node_range.0.is_empty(),
+                            "Cannot replace an empty node range: {:?}",
+                            node_range.0
+                        );
+
+                        // Replace the tokens in range with zero or one `FlatToken::AttrsTarget`s,
+                        // plus enough `FlatToken::Empty`s to fill up the rest of the range. This
+                        // keeps the total length of `tokens` constant throughout the replacement
+                        // process, allowing us to do all replacements without adjusting indices.
+                        let target_len = target.is_some() as usize;
+                        tokens.splice(
+                            (node_range.0.start as usize)..(node_range.0.end as usize),
+                            target.into_iter().map(|target| FlatToken::AttrsTarget(target)).chain(
+                                iter::repeat(FlatToken::Empty)
+                                    .take(node_range.0.len() - target_len),
+                            ),
+                        );
+                    }
+                    make_attr_token_stream(tokens.into_iter(), *break_last_token)
+                }
+            }
+        }
+    }
+}
+
+/// A helper struct used when building an `AttrTokenStream` from
+/// a `LazyAttrTokenStream`. Both delimiter and non-delimited tokens
+/// are stored as `FlatToken::Token`. A vector of `FlatToken`s
+/// is then 'parsed' to build up an `AttrTokenStream` with nested
+/// `AttrTokenTree::Delimited` tokens.
+#[derive(Debug, Clone)]
+enum FlatToken {
+    /// A token - this holds both delimiter (e.g. '{' and '}')
+    /// and non-delimiter tokens
+    Token((Token, Spacing)),
+    /// Holds the `AttrsTarget` for an AST node. The `AttrsTarget` is inserted
+    /// directly into the constructed `AttrTokenStream` as an
+    /// `AttrTokenTree::AttrsTarget`.
+    AttrsTarget(AttrsTarget),
+    /// A special 'empty' token that is ignored during the conversion
+    /// to an `AttrTokenStream`. This is used to simplify the
+    /// handling of replace ranges.
+    Empty,
+}
+
 /// An `AttrTokenStream` is similar to a `TokenStream`, but with extra
 /// information about the tokens for attribute targets. This is used
 /// during expansion to perform early cfg-expansion, and to process attributes
@@ -163,6 +348,71 @@ impl<CTX> HashStable<CTX> for LazyAttrTokenStream {
 #[derive(Clone, Debug, Default, Encodable, Decodable)]
 pub struct AttrTokenStream(pub Arc<Vec<AttrTokenTree>>);
 
+/// Converts a flattened iterator of tokens (including open and close delimiter tokens) into an
+/// `AttrTokenStream`, creating an `AttrTokenTree::Delimited` for each matching pair of open and
+/// close delims.
+fn make_attr_token_stream(
+    iter: impl Iterator<Item = FlatToken>,
+    break_last_token: u32,
+) -> AttrTokenStream {
+    #[derive(Debug)]
+    struct FrameData {
+        // This is `None` for the first frame, `Some` for all others.
+        open_delim_sp: Option<(Delimiter, Span, Spacing)>,
+        inner: Vec<AttrTokenTree>,
+    }
+    // The stack always has at least one element. Storing it separately makes for shorter code.
+    let mut stack_top = FrameData { open_delim_sp: None, inner: vec![] };
+    let mut stack_rest = vec![];
+    for flat_token in iter {
+        match flat_token {
+            FlatToken::Token((token @ Token { kind, span }, spacing)) => {
+                if let Some(delim) = kind.open_delim() {
+                    stack_rest.push(mem::replace(
+                        &mut stack_top,
+                        FrameData { open_delim_sp: Some((delim, span, spacing)), inner: vec![] },
+                    ));
+                } else if let Some(delim) = kind.close_delim() {
+                    let frame_data = mem::replace(&mut stack_top, stack_rest.pop().unwrap());
+                    let (open_delim, open_sp, open_spacing) = frame_data.open_delim_sp.unwrap();
+                    assert!(
+                        open_delim.eq_ignoring_invisible_origin(&delim),
+                        "Mismatched open/close delims: open={open_delim:?} close={span:?}"
+                    );
+                    let dspan = DelimSpan::from_pair(open_sp, span);
+                    let dspacing = DelimSpacing::new(open_spacing, spacing);
+                    let stream = AttrTokenStream::new(frame_data.inner);
+                    let delimited = AttrTokenTree::Delimited(dspan, dspacing, delim, stream);
+                    stack_top.inner.push(delimited);
+                } else {
+                    stack_top.inner.push(AttrTokenTree::Token(token, spacing))
+                }
+            }
+            FlatToken::AttrsTarget(target) => {
+                stack_top.inner.push(AttrTokenTree::AttrsTarget(target))
+            }
+            FlatToken::Empty => {}
+        }
+    }
+
+    if break_last_token > 0 {
+        let last_token = stack_top.inner.pop().unwrap();
+        if let AttrTokenTree::Token(last_token, spacing) = last_token {
+            let (unglued, _) = last_token.kind.break_two_token_op(break_last_token).unwrap();
+
+            // Tokens are always ASCII chars, so we can use byte arithmetic here.
+            let mut first_span = last_token.span.shrink_to_lo();
+            first_span =
+                first_span.with_hi(first_span.lo() + rustc_span::BytePos(break_last_token));
+
+            stack_top.inner.push(AttrTokenTree::Token(Token::new(unglued, first_span), spacing));
+        } else {
+            panic!("Unexpected last token {last_token:?}")
+        }
+    }
+    AttrTokenStream::new(stack_top.inner)
+}
+
 /// Like `TokenTree`, but for `AttrTokenStream`.
 #[derive(Clone, Debug, Encodable, Decodable)]
 pub enum AttrTokenTree {
@@ -641,6 +891,104 @@ impl<'t> Iterator for TokenStreamIter<'t> {
     }
 }
 
+#[derive(Clone, Debug)]
+pub struct TokenTreeCursor {
+    stream: TokenStream,
+    /// Points to the current token tree in the stream. In `TokenCursor::curr`,
+    /// this can be any token tree. In `TokenCursor::stack`, this is always a
+    /// `TokenTree::Delimited`.
+    index: usize,
+}
+
+impl TokenTreeCursor {
+    #[inline]
+    pub fn new(stream: TokenStream) -> Self {
+        TokenTreeCursor { stream, index: 0 }
+    }
+
+    #[inline]
+    pub fn curr(&self) -> Option<&TokenTree> {
+        self.stream.get(self.index)
+    }
+
+    pub fn look_ahead(&self, n: usize) -> Option<&TokenTree> {
+        self.stream.get(self.index + n)
+    }
+
+    #[inline]
+    pub fn bump(&mut self) {
+        self.index += 1;
+    }
+}
+
+/// A `TokenStream` cursor that produces `Token`s. It's a bit odd that
+/// we (a) lex tokens into a nice tree structure (`TokenStream`), and then (b)
+/// use this type to emit them as a linear sequence. But a linear sequence is
+/// what the parser expects, for the most part.
+#[derive(Clone, Debug)]
+pub struct TokenCursor {
+    // Cursor for the current (innermost) token stream. The index within the
+    // cursor can point to any token tree in the stream (or one past the end).
+    // The delimiters for this token stream are found in `self.stack.last()`;
+    // if that is `None` we are in the outermost token stream which never has
+    // delimiters.
+    pub curr: TokenTreeCursor,
+
+    // Token streams surrounding the current one. The index within each cursor
+    // always points to a `TokenTree::Delimited`.
+    pub stack: Vec<TokenTreeCursor>,
+}
+
+impl TokenCursor {
+    pub fn next(&mut self) -> (Token, Spacing) {
+        self.inlined_next()
+    }
+
+    /// This always-inlined version should only be used on hot code paths.
+    #[inline(always)]
+    pub fn inlined_next(&mut self) -> (Token, Spacing) {
+        loop {
+            // FIXME: we currently don't return `Delimiter::Invisible` open/close delims. To fix
+            // #67062 we will need to, whereupon the `delim != Delimiter::Invisible` conditions
+            // below can be removed.
+            if let Some(tree) = self.curr.curr() {
+                match tree {
+                    &TokenTree::Token(token, spacing) => {
+                        debug_assert!(!token.kind.is_delim());
+                        let res = (token, spacing);
+                        self.curr.bump();
+                        return res;
+                    }
+                    &TokenTree::Delimited(sp, spacing, delim, ref tts) => {
+                        let trees = TokenTreeCursor::new(tts.clone());
+                        self.stack.push(mem::replace(&mut self.curr, trees));
+                        if !delim.skip() {
+                            return (Token::new(delim.as_open_token_kind(), sp.open), spacing.open);
+                        }
+                        // No open delimiter to return; continue on to the next iteration.
+                    }
+                };
+            } else if let Some(parent) = self.stack.pop() {
+                // We have exhausted this token stream. Move back to its parent token stream.
+                let Some(&TokenTree::Delimited(span, spacing, delim, _)) = parent.curr() else {
+                    panic!("parent should be Delimited")
+                };
+                self.curr = parent;
+                self.curr.bump(); // move past the `Delimited`
+                if !delim.skip() {
+                    return (Token::new(delim.as_close_token_kind(), span.close), spacing.close);
+                }
+                // No close delimiter to return; continue on to the next iteration.
+            } else {
+                // We have exhausted the outermost token stream. The use of
+                // `Spacing::Alone` is arbitrary and immaterial, because the
+                // `Eof` token's spacing is never used.
+                return (Token::new(token::Eof, DUMMY_SP), Spacing::Alone);
+            }
+        }
+    }
+}
+
 #[derive(Debug, Copy, Clone, PartialEq, Encodable, Decodable, HashStable_Generic)]
 pub struct DelimSpan {
     pub open: Span,
@@ -687,6 +1035,7 @@ mod size_asserts {
     static_assert_size!(AttrTokenStream, 8);
     static_assert_size!(AttrTokenTree, 32);
     static_assert_size!(LazyAttrTokenStream, 8);
+    static_assert_size!(LazyAttrTokenStreamInner, 88);
     static_assert_size!(Option<LazyAttrTokenStream>, 8); // must be small, used in many AST nodes
     static_assert_size!(TokenStream, 8);
     static_assert_size!(TokenTree, 32);
diff --git a/compiler/rustc_ast/src/visit.rs b/compiler/rustc_ast/src/visit.rs
index 79193fcec63..69a186c8cf1 100644
--- a/compiler/rustc_ast/src/visit.rs
+++ b/compiler/rustc_ast/src/visit.rs
@@ -608,6 +608,7 @@ pub fn walk_ty_pat<'a, V: Visitor<'a>>(visitor: &mut V, tp: &'a TyPat) -> V::Res
             visit_opt!(visitor, visit_anon_const, start);
             visit_opt!(visitor, visit_anon_const, end);
         }
+        TyPatKind::Or(variants) => walk_list!(visitor, visit_ty_pat, variants),
         TyPatKind::Err(_) => {}
     }
     V::Result::output()
diff --git a/compiler/rustc_ast_lowering/src/pat.rs b/compiler/rustc_ast_lowering/src/pat.rs
index f94d788a9b0..4a6929ef011 100644
--- a/compiler/rustc_ast_lowering/src/pat.rs
+++ b/compiler/rustc_ast_lowering/src/pat.rs
@@ -464,6 +464,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
                         )
                     }),
             ),
+            TyPatKind::Or(variants) => {
+                hir::TyPatKind::Or(self.arena.alloc_from_iter(
+                    variants.iter().map(|pat| self.lower_ty_pat_mut(pat, base_type)),
+                ))
+            }
             TyPatKind::Err(guar) => hir::TyPatKind::Err(*guar),
         };
 
diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs
index 985359e1234..b5925fab7d9 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state.rs
@@ -634,6 +634,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
                 false,
                 None,
                 *delim,
+                None,
                 tokens,
                 true,
                 span,
@@ -679,6 +680,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
                     false,
                     None,
                     *delim,
+                    Some(spacing.open),
                     tts,
                     convert_dollar_crate,
                     dspan.entire(),
@@ -735,6 +737,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
         has_bang: bool,
         ident: Option<Ident>,
         delim: Delimiter,
+        open_spacing: Option<Spacing>,
         tts: &TokenStream,
         convert_dollar_crate: bool,
         span: Span,
@@ -758,16 +761,26 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
                     self.nbsp();
                 }
                 self.word("{");
-                if !tts.is_empty() {
+
+                // Respect `Alone`, if provided, and print a space. Unless the list is empty.
+                let open_space = (open_spacing == None || open_spacing == Some(Spacing::Alone))
+                    && !tts.is_empty();
+                if open_space {
                     self.space();
                 }
                 let ib = self.ibox(0);
                 self.print_tts(tts, convert_dollar_crate);
                 self.end(ib);
-                let empty = tts.is_empty();
-                self.bclose(span, empty, cb.unwrap());
+
+                // Use `open_space` for the spacing *before* the closing delim.
+                // Because spacing on delimiters is lost when going through
+                // proc macros, and otherwise we can end up with ugly cases
+                // like `{ x}`. Symmetry is better.
+                self.bclose(span, !open_space, cb.unwrap());
             }
             delim => {
+                // `open_spacing` is ignored. We never print spaces after
+                // non-brace opening delims or before non-brace closing delims.
                 let token_str = self.token_kind_to_string(&delim.as_open_token_kind());
                 self.word(token_str);
                 let ib = self.ibox(0);
@@ -797,6 +810,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
             has_bang,
             Some(*ident),
             macro_def.body.delim,
+            None,
             &macro_def.body.tokens,
             true,
             sp,
@@ -844,9 +858,9 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
         self.end(ib);
     }
 
-    fn bclose_maybe_open(&mut self, span: rustc_span::Span, empty: bool, cb: Option<BoxMarker>) {
+    fn bclose_maybe_open(&mut self, span: rustc_span::Span, no_space: bool, cb: Option<BoxMarker>) {
         let has_comment = self.maybe_print_comment(span.hi());
-        if !empty || has_comment {
+        if !no_space || has_comment {
             self.break_offset_if_not_bol(1, -INDENT_UNIT);
         }
         self.word("}");
@@ -855,9 +869,9 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
         }
     }
 
-    fn bclose(&mut self, span: rustc_span::Span, empty: bool, cb: BoxMarker) {
+    fn bclose(&mut self, span: rustc_span::Span, no_space: bool, cb: BoxMarker) {
         let cb = Some(cb);
-        self.bclose_maybe_open(span, empty, cb)
+        self.bclose_maybe_open(span, no_space, cb)
     }
 
     fn break_offset_if_not_bol(&mut self, n: usize, off: isize) {
@@ -1158,6 +1172,17 @@ impl<'a> State<'a> {
                     self.print_expr_anon_const(end, &[]);
                 }
             }
+            rustc_ast::TyPatKind::Or(variants) => {
+                let mut first = true;
+                for pat in variants {
+                    if first {
+                        first = false
+                    } else {
+                        self.word(" | ");
+                    }
+                    self.print_ty_pat(pat);
+                }
+            }
             rustc_ast::TyPatKind::Err(_) => {
                 self.popen();
                 self.word("/*ERROR*/");
@@ -1423,8 +1448,8 @@ impl<'a> State<'a> {
             }
         }
 
-        let empty = !has_attrs && blk.stmts.is_empty();
-        self.bclose_maybe_open(blk.span, empty, cb);
+        let no_space = !has_attrs && blk.stmts.is_empty();
+        self.bclose_maybe_open(blk.span, no_space, cb);
         self.ann.post(self, AnnNode::Block(blk))
     }
 
@@ -1471,6 +1496,7 @@ impl<'a> State<'a> {
             true,
             None,
             m.args.delim,
+            None,
             &m.args.tokens,
             true,
             m.span(),
diff --git a/compiler/rustc_attr_parsing/src/lib.rs b/compiler/rustc_attr_parsing/src/lib.rs
index b9692c01e2c..874fccf7ff6 100644
--- a/compiler/rustc_attr_parsing/src/lib.rs
+++ b/compiler/rustc_attr_parsing/src/lib.rs
@@ -80,6 +80,7 @@
 #![cfg_attr(bootstrap, feature(let_chains))]
 #![doc(rust_logo)]
 #![feature(rustdoc_internals)]
+#![recursion_limit = "256"]
 // tidy-alphabetical-end
 
 #[macro_use]
diff --git a/compiler/rustc_builtin_macros/src/autodiff.rs b/compiler/rustc_builtin_macros/src/autodiff.rs
index 6d97dfa3a4d..8c5c20c7af4 100644
--- a/compiler/rustc_builtin_macros/src/autodiff.rs
+++ b/compiler/rustc_builtin_macros/src/autodiff.rs
@@ -323,9 +323,9 @@ mod llvm_enzyme {
             Spacing::Joint,
         )];
         let never_arg = ast::DelimArgs {
-            dspan: ast::tokenstream::DelimSpan::from_single(span),
+            dspan: DelimSpan::from_single(span),
             delim: ast::token::Delimiter::Parenthesis,
-            tokens: ast::tokenstream::TokenStream::from_iter(ts2),
+            tokens: TokenStream::from_iter(ts2),
         };
         let inline_item = ast::AttrItem {
             unsafety: ast::Safety::Default,
diff --git a/compiler/rustc_builtin_macros/src/lib.rs b/compiler/rustc_builtin_macros/src/lib.rs
index 70e817db2a6..c2f5bf0f457 100644
--- a/compiler/rustc_builtin_macros/src/lib.rs
+++ b/compiler/rustc_builtin_macros/src/lib.rs
@@ -18,6 +18,7 @@
 #![feature(rustdoc_internals)]
 #![feature(string_from_utf8_lossy_owned)]
 #![feature(try_blocks)]
+#![recursion_limit = "256"]
 // tidy-alphabetical-end
 
 extern crate proc_macro;
diff --git a/compiler/rustc_builtin_macros/src/pattern_type.rs b/compiler/rustc_builtin_macros/src/pattern_type.rs
index a55c7e962d0..3529e5525fc 100644
--- a/compiler/rustc_builtin_macros/src/pattern_type.rs
+++ b/compiler/rustc_builtin_macros/src/pattern_type.rs
@@ -1,9 +1,10 @@
 use rustc_ast::ptr::P;
 use rustc_ast::tokenstream::TokenStream;
-use rustc_ast::{AnonConst, DUMMY_NODE_ID, Ty, TyPat, TyPatKind, ast};
+use rustc_ast::{AnonConst, DUMMY_NODE_ID, Ty, TyPat, TyPatKind, ast, token};
 use rustc_errors::PResult;
 use rustc_expand::base::{self, DummyResult, ExpandResult, ExtCtxt, MacroExpanderResult};
 use rustc_parse::exp;
+use rustc_parse::parser::{CommaRecoveryMode, RecoverColon, RecoverComma};
 use rustc_span::Span;
 
 pub(crate) fn expand<'cx>(
@@ -26,19 +27,42 @@ fn parse_pat_ty<'a>(cx: &mut ExtCtxt<'a>, stream: TokenStream) -> PResult<'a, (P
 
     let ty = parser.parse_ty()?;
     parser.expect_keyword(exp!(Is))?;
-    let pat = parser.parse_pat_no_top_alt(None, None)?.into_inner();
 
+    let pat = pat_to_ty_pat(
+        cx,
+        parser
+            .parse_pat_no_top_guard(
+                None,
+                RecoverComma::No,
+                RecoverColon::No,
+                CommaRecoveryMode::EitherTupleOrPipe,
+            )?
+            .into_inner(),
+    );
+
+    if parser.token != token::Eof {
+        parser.unexpected()?;
+    }
+
+    Ok((ty, pat))
+}
+
+fn ty_pat(kind: TyPatKind, span: Span) -> P<TyPat> {
+    P(TyPat { id: DUMMY_NODE_ID, kind, span, tokens: None })
+}
+
+fn pat_to_ty_pat(cx: &mut ExtCtxt<'_>, pat: ast::Pat) -> P<TyPat> {
     let kind = match pat.kind {
         ast::PatKind::Range(start, end, include_end) => TyPatKind::Range(
             start.map(|value| P(AnonConst { id: DUMMY_NODE_ID, value })),
             end.map(|value| P(AnonConst { id: DUMMY_NODE_ID, value })),
             include_end,
         ),
+        ast::PatKind::Or(variants) => TyPatKind::Or(
+            variants.into_iter().map(|pat| pat_to_ty_pat(cx, pat.into_inner())).collect(),
+        ),
         ast::PatKind::Err(guar) => TyPatKind::Err(guar),
         _ => TyPatKind::Err(cx.dcx().span_err(pat.span, "pattern not supported in pattern types")),
     };
-
-    let pat = P(TyPat { id: pat.id, kind, span: pat.span, tokens: pat.tokens });
-
-    Ok((ty, pat))
+    ty_pat(kind, pat.span)
 }
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index e9c7186b03c..524e0d9fe35 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -8,8 +8,6 @@ use rustc_ast::InlineAsmOptions;
 use rustc_codegen_ssa::base::is_call_from_compiler_builtins_to_upstream_monomorphization;
 use rustc_data_structures::profiling::SelfProfilerRef;
 use rustc_index::IndexVec;
-use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
-use rustc_middle::mir::InlineAsmMacro;
 use rustc_middle::ty::TypeVisitableExt;
 use rustc_middle::ty::adjustment::PointerCoercion;
 use rustc_middle::ty::layout::{FnAbiOf, HasTypingEnv};
@@ -18,7 +16,6 @@ use rustc_middle::ty::print::with_no_trimmed_paths;
 use crate::constant::ConstantCx;
 use crate::debuginfo::{FunctionDebugContext, TypeDebugContext};
 use crate::enable_verifier;
-use crate::inline_asm::codegen_naked_asm;
 use crate::prelude::*;
 use crate::pretty_clif::CommentWriter;
 
@@ -37,7 +34,7 @@ pub(crate) fn codegen_fn<'tcx>(
     cached_func: Function,
     module: &mut dyn Module,
     instance: Instance<'tcx>,
-) -> Option<CodegenedFunction> {
+) -> CodegenedFunction {
     debug_assert!(!instance.args.has_infer());
 
     let symbol_name = tcx.symbol_name(instance).name.to_string();
@@ -54,38 +51,6 @@ pub(crate) fn codegen_fn<'tcx>(
         String::from_utf8_lossy(&buf).into_owned()
     });
 
-    if tcx.codegen_fn_attrs(instance.def_id()).flags.contains(CodegenFnAttrFlags::NAKED) {
-        assert_eq!(mir.basic_blocks.len(), 1);
-        assert!(mir.basic_blocks[START_BLOCK].statements.is_empty());
-
-        match &mir.basic_blocks[START_BLOCK].terminator().kind {
-            TerminatorKind::InlineAsm {
-                asm_macro: InlineAsmMacro::NakedAsm,
-                template,
-                operands,
-                options,
-                line_spans: _,
-                targets: _,
-                unwind: _,
-            } => {
-                codegen_naked_asm(
-                    tcx,
-                    cx,
-                    module,
-                    instance,
-                    mir.basic_blocks[START_BLOCK].terminator().source_info.span,
-                    &symbol_name,
-                    template,
-                    operands,
-                    *options,
-                );
-            }
-            _ => unreachable!(),
-        }
-
-        return None;
-    }
-
     // Declare function
     let sig = get_function_sig(tcx, module.target_config().default_call_conv, instance);
     let func_id = module.declare_function(&symbol_name, Linkage::Local, &sig).unwrap();
@@ -166,7 +131,7 @@ pub(crate) fn codegen_fn<'tcx>(
     // Verify function
     verify_func(tcx, &clif_comments, &func);
 
-    Some(CodegenedFunction { symbol_name, func_id, func, clif_comments, func_debug_cx })
+    CodegenedFunction { symbol_name, func_id, func, clif_comments, func_debug_cx }
 }
 
 pub(crate) fn compile_fn(
diff --git a/compiler/rustc_codegen_cranelift/src/driver/aot.rs b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
index 00136ac4a57..5d07c94859f 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/aot.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
@@ -22,7 +22,10 @@ use rustc_data_structures::sync::{IntoDynSyncSend, par_map};
 use rustc_metadata::EncodedMetadata;
 use rustc_metadata::fs::copy_to_stdout;
 use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
-use rustc_middle::mir::mono::{CodegenUnit, MonoItem};
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::mono::{
+    CodegenUnit, Linkage as RLinkage, MonoItem, MonoItemData, Visibility,
+};
 use rustc_session::Session;
 use rustc_session::config::{DebugInfo, OutFileName, OutputFilenames, OutputType};
 
@@ -30,7 +33,7 @@ use crate::CodegenCx;
 use crate::base::CodegenedFunction;
 use crate::concurrency_limiter::{ConcurrencyLimiter, ConcurrencyLimiterToken};
 use crate::debuginfo::TypeDebugContext;
-use crate::global_asm::GlobalAsmConfig;
+use crate::global_asm::{GlobalAsmConfig, GlobalAsmContext};
 use crate::prelude::*;
 use crate::unwind_module::UnwindModule;
 
@@ -530,19 +533,35 @@ fn codegen_cgu_content(
     let mut type_dbg = TypeDebugContext::default();
     super::predefine_mono_items(tcx, module, &mono_items);
     let mut codegened_functions = vec![];
-    for (mono_item, _) in mono_items {
+    for (mono_item, item_data) in mono_items {
         match mono_item {
-            MonoItem::Fn(inst) => {
-                if let Some(codegened_function) = crate::base::codegen_fn(
+            MonoItem::Fn(instance) => {
+                if tcx.codegen_fn_attrs(instance.def_id()).flags.contains(CodegenFnAttrFlags::NAKED)
+                {
+                    rustc_codegen_ssa::mir::naked_asm::codegen_naked_asm(
+                        &mut GlobalAsmContext { tcx, global_asm: &mut cx.global_asm },
+                        instance,
+                        MonoItemData {
+                            linkage: RLinkage::External,
+                            visibility: if item_data.linkage == RLinkage::Internal {
+                                Visibility::Hidden
+                            } else {
+                                item_data.visibility
+                            },
+                            ..item_data
+                        },
+                    );
+                    continue;
+                }
+                let codegened_function = crate::base::codegen_fn(
                     tcx,
                     &mut cx,
                     &mut type_dbg,
                     Function::new(),
                     module,
-                    inst,
-                ) {
-                    codegened_functions.push(codegened_function);
-                }
+                    instance,
+                );
+                codegened_functions.push(codegened_function);
             }
             MonoItem::Static(def_id) => {
                 let data_id = crate::constant::codegen_static(tcx, module, def_id);
@@ -551,7 +570,10 @@ fn codegen_cgu_content(
                 }
             }
             MonoItem::GlobalAsm(item_id) => {
-                crate::global_asm::codegen_global_asm_item(tcx, &mut cx.global_asm, item_id);
+                rustc_codegen_ssa::base::codegen_global_asm(
+                    &mut GlobalAsmContext { tcx, global_asm: &mut cx.global_asm },
+                    item_id,
+                );
             }
         }
     }
diff --git a/compiler/rustc_codegen_cranelift/src/driver/jit.rs b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
index 41f8bb9161c..e368cf4386d 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/jit.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
@@ -126,6 +126,11 @@ pub(crate) fn codegen_and_compile_fn<'tcx>(
     module: &mut dyn Module,
     instance: Instance<'tcx>,
 ) {
+    if tcx.codegen_fn_attrs(instance.def_id()).flags.contains(CodegenFnAttrFlags::NAKED) {
+        tcx.dcx()
+            .span_fatal(tcx.def_span(instance.def_id()), "Naked asm is not supported in JIT mode");
+    }
+
     cranelift_codegen::timing::set_thread_profiler(Box::new(super::MeasuremeProfiler(
         tcx.prof.clone(),
     )));
@@ -135,16 +140,15 @@ pub(crate) fn codegen_and_compile_fn<'tcx>(
             crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
 
         let cached_func = std::mem::replace(&mut cached_context.func, Function::new());
-        if let Some(codegened_func) = crate::base::codegen_fn(
+        let codegened_func = crate::base::codegen_fn(
             tcx,
             cx,
             &mut TypeDebugContext::default(),
             cached_func,
             module,
             instance,
-        ) {
-            crate::base::compile_fn(cx, &tcx.prof, cached_context, module, codegened_func);
-        }
+        );
+        crate::base::compile_fn(cx, &tcx.prof, cached_context, module, codegened_func);
     });
 }
 
diff --git a/compiler/rustc_codegen_cranelift/src/global_asm.rs b/compiler/rustc_codegen_cranelift/src/global_asm.rs
index 79cefb05de3..203b443269f 100644
--- a/compiler/rustc_codegen_cranelift/src/global_asm.rs
+++ b/compiler/rustc_codegen_cranelift/src/global_asm.rs
@@ -7,102 +7,139 @@ use std::process::{Command, Stdio};
 use std::sync::Arc;
 
 use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
-use rustc_hir::{InlineAsmOperand, ItemId};
-use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_codegen_ssa::traits::{AsmCodegenMethods, GlobalAsmOperandRef};
+use rustc_middle::ty::TyCtxt;
+use rustc_middle::ty::layout::{
+    FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasTyCtxt, HasTypingEnv, LayoutError, LayoutOfHelpers,
+};
 use rustc_session::config::{OutputFilenames, OutputType};
 use rustc_target::asm::InlineAsmArch;
 
 use crate::prelude::*;
 
-pub(crate) fn codegen_global_asm_item(tcx: TyCtxt<'_>, global_asm: &mut String, item_id: ItemId) {
-    let item = tcx.hir_item(item_id);
-    if let rustc_hir::ItemKind::GlobalAsm { asm, .. } = item.kind {
-        let is_x86 =
-            matches!(tcx.sess.asm_arch.unwrap(), InlineAsmArch::X86 | InlineAsmArch::X86_64);
-
-        if is_x86 {
-            if !asm.options.contains(InlineAsmOptions::ATT_SYNTAX) {
-                global_asm.push_str("\n.intel_syntax noprefix\n");
-            } else {
-                global_asm.push_str("\n.att_syntax\n");
-            }
+pub(crate) struct GlobalAsmContext<'a, 'tcx> {
+    pub tcx: TyCtxt<'tcx>,
+    pub global_asm: &'a mut String,
+}
+
+impl<'tcx> AsmCodegenMethods<'tcx> for GlobalAsmContext<'_, 'tcx> {
+    fn codegen_global_asm(
+        &mut self,
+        template: &[InlineAsmTemplatePiece],
+        operands: &[GlobalAsmOperandRef<'tcx>],
+        options: InlineAsmOptions,
+        _line_spans: &[Span],
+    ) {
+        codegen_global_asm_inner(self.tcx, self.global_asm, template, operands, options);
+    }
+
+    fn mangled_name(&self, instance: Instance<'tcx>) -> String {
+        let symbol_name = self.tcx.symbol_name(instance).name.to_owned();
+        if self.tcx.sess.target.is_like_darwin { format!("_{symbol_name}") } else { symbol_name }
+    }
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for GlobalAsmContext<'_, 'tcx> {
+    #[inline]
+    fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+        if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
+            self.tcx.sess.dcx().span_fatal(span, err.to_string())
+        } else {
+            self.tcx
+                .sess
+                .dcx()
+                .span_fatal(span, format!("failed to get layout for `{}`: {}", ty, err))
         }
-        for piece in asm.template {
-            match *piece {
-                InlineAsmTemplatePiece::String(ref s) => global_asm.push_str(s),
-                InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: op_sp } => {
-                    match asm.operands[operand_idx].0 {
-                        InlineAsmOperand::Const { ref anon_const } => {
-                            match tcx.const_eval_poly(anon_const.def_id.to_def_id()) {
-                                Ok(const_value) => {
-                                    let ty = tcx
-                                        .typeck_body(anon_const.body)
-                                        .node_type(anon_const.hir_id);
-                                    let string = rustc_codegen_ssa::common::asm_const_to_str(
-                                        tcx,
-                                        op_sp,
-                                        const_value,
-                                        FullyMonomorphizedLayoutCx(tcx).layout_of(ty),
-                                    );
-                                    global_asm.push_str(&string);
-                                }
-                                Err(ErrorHandled::Reported { .. }) => {
-                                    // An error has already been reported and compilation is
-                                    // guaranteed to fail if execution hits this path.
-                                }
-                                Err(ErrorHandled::TooGeneric(_)) => {
-                                    span_bug!(op_sp, "asm const cannot be resolved; too generic");
-                                }
-                            }
-                        }
-                        InlineAsmOperand::SymFn { expr } => {
-                            if cfg!(not(feature = "inline_asm_sym")) {
-                                tcx.dcx().span_err(
-                                    item.span,
-                                    "asm! and global_asm! sym operands are not yet supported",
-                                );
-                            }
-
-                            let ty = tcx.typeck(item_id.owner_id).expr_ty(expr);
-                            let instance = match ty.kind() {
-                                &ty::FnDef(def_id, args) => Instance::new(def_id, args),
-                                _ => span_bug!(op_sp, "asm sym is not a function"),
-                            };
-                            let symbol = tcx.symbol_name(instance);
-                            // FIXME handle the case where the function was made private to the
-                            // current codegen unit
-                            global_asm.push_str(symbol.name);
-                        }
-                        InlineAsmOperand::SymStatic { path: _, def_id } => {
-                            if cfg!(not(feature = "inline_asm_sym")) {
-                                tcx.dcx().span_err(
-                                    item.span,
-                                    "asm! and global_asm! sym operands are not yet supported",
-                                );
-                            }
-
-                            let instance = Instance::mono(tcx, def_id);
-                            let symbol = tcx.symbol_name(instance);
-                            global_asm.push_str(symbol.name);
+    }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for GlobalAsmContext<'_, 'tcx> {
+    #[inline]
+    fn handle_fn_abi_err(
+        &self,
+        err: FnAbiError<'tcx>,
+        span: Span,
+        fn_abi_request: FnAbiRequest<'tcx>,
+    ) -> ! {
+        FullyMonomorphizedLayoutCx(self.tcx).handle_fn_abi_err(err, span, fn_abi_request)
+    }
+}
+
+impl<'tcx> HasTyCtxt<'tcx> for GlobalAsmContext<'_, 'tcx> {
+    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+}
+
+impl<'tcx> rustc_abi::HasDataLayout for GlobalAsmContext<'_, 'tcx> {
+    fn data_layout(&self) -> &rustc_abi::TargetDataLayout {
+        &self.tcx.data_layout
+    }
+}
+
+impl<'tcx> HasTypingEnv<'tcx> for GlobalAsmContext<'_, 'tcx> {
+    fn typing_env(&self) -> ty::TypingEnv<'tcx> {
+        ty::TypingEnv::fully_monomorphized()
+    }
+}
+
+fn codegen_global_asm_inner<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    global_asm: &mut String,
+    template: &[InlineAsmTemplatePiece],
+    operands: &[GlobalAsmOperandRef<'tcx>],
+    options: InlineAsmOptions,
+) {
+    let is_x86 = matches!(tcx.sess.asm_arch.unwrap(), InlineAsmArch::X86 | InlineAsmArch::X86_64);
+
+    if is_x86 {
+        if !options.contains(InlineAsmOptions::ATT_SYNTAX) {
+            global_asm.push_str("\n.intel_syntax noprefix\n");
+        } else {
+            global_asm.push_str("\n.att_syntax\n");
+        }
+    }
+    for piece in template {
+        match *piece {
+            InlineAsmTemplatePiece::String(ref s) => global_asm.push_str(s),
+            InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span } => {
+                match operands[operand_idx] {
+                    GlobalAsmOperandRef::Const { ref string } => {
+                        global_asm.push_str(string);
+                    }
+                    GlobalAsmOperandRef::SymFn { instance } => {
+                        if cfg!(not(feature = "inline_asm_sym")) {
+                            tcx.dcx().span_err(
+                                span,
+                                "asm! and global_asm! sym operands are not yet supported",
+                            );
                         }
-                        InlineAsmOperand::In { .. }
-                        | InlineAsmOperand::Out { .. }
-                        | InlineAsmOperand::InOut { .. }
-                        | InlineAsmOperand::SplitInOut { .. }
-                        | InlineAsmOperand::Label { .. } => {
-                            span_bug!(op_sp, "invalid operand type for global_asm!")
+
+                        let symbol = tcx.symbol_name(instance);
+                        // FIXME handle the case where the function was made private to the
+                        // current codegen unit
+                        global_asm.push_str(symbol.name);
+                    }
+                    GlobalAsmOperandRef::SymStatic { def_id } => {
+                        if cfg!(not(feature = "inline_asm_sym")) {
+                            tcx.dcx().span_err(
+                                span,
+                                "asm! and global_asm! sym operands are not yet supported",
+                            );
                         }
+
+                        let instance = Instance::mono(tcx, def_id);
+                        let symbol = tcx.symbol_name(instance);
+                        global_asm.push_str(symbol.name);
                     }
                 }
             }
         }
+    }
 
-        global_asm.push('\n');
-        if is_x86 {
-            global_asm.push_str(".att_syntax\n\n");
-        }
-    } else {
-        bug!("Expected GlobalAsm found {:?}", item);
+    global_asm.push('\n');
+    if is_x86 {
+        global_asm.push_str(".att_syntax\n\n");
     }
 }
 
diff --git a/compiler/rustc_codegen_cranelift/src/inline_asm.rs b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
index fbc33a64285..afee5095549 100644
--- a/compiler/rustc_codegen_cranelift/src/inline_asm.rs
+++ b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
@@ -161,7 +161,6 @@ pub(crate) fn codegen_inline_asm_inner<'tcx>(
         stack_slots_input: Vec::new(),
         stack_slots_output: Vec::new(),
         stack_slot_size: Size::from_bytes(0),
-        is_naked: false,
     };
     asm_gen.allocate_registers();
     asm_gen.allocate_stack_slots();
@@ -201,114 +200,6 @@ pub(crate) fn codegen_inline_asm_inner<'tcx>(
     call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
 }
 
-pub(crate) fn codegen_naked_asm<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    cx: &mut crate::CodegenCx,
-    module: &mut dyn Module,
-    instance: Instance<'tcx>,
-    span: Span,
-    symbol_name: &str,
-    template: &[InlineAsmTemplatePiece],
-    operands: &[InlineAsmOperand<'tcx>],
-    options: InlineAsmOptions,
-) {
-    // FIXME add .eh_frame unwind info directives
-
-    let operands = operands
-        .iter()
-        .map(|operand| match *operand {
-            InlineAsmOperand::In { .. }
-            | InlineAsmOperand::Out { .. }
-            | InlineAsmOperand::InOut { .. } => {
-                span_bug!(span, "invalid operand type for naked asm")
-            }
-            InlineAsmOperand::Const { ref value } => {
-                let cv = instance.instantiate_mir_and_normalize_erasing_regions(
-                    tcx,
-                    ty::TypingEnv::fully_monomorphized(),
-                    ty::EarlyBinder::bind(value.const_),
-                );
-                let const_value = cv
-                    .eval(tcx, ty::TypingEnv::fully_monomorphized(), value.span)
-                    .expect("erroneous constant missed by mono item collection");
-
-                let value = rustc_codegen_ssa::common::asm_const_to_str(
-                    tcx,
-                    span,
-                    const_value,
-                    FullyMonomorphizedLayoutCx(tcx).layout_of(cv.ty()),
-                );
-                CInlineAsmOperand::Const { value }
-            }
-            InlineAsmOperand::SymFn { ref value } => {
-                if cfg!(not(feature = "inline_asm_sym")) {
-                    tcx.dcx()
-                        .span_err(span, "asm! and global_asm! sym operands are not yet supported");
-                }
-
-                let const_ = instance.instantiate_mir_and_normalize_erasing_regions(
-                    tcx,
-                    ty::TypingEnv::fully_monomorphized(),
-                    ty::EarlyBinder::bind(value.const_),
-                );
-                if let ty::FnDef(def_id, args) = *const_.ty().kind() {
-                    let instance = ty::Instance::resolve_for_fn_ptr(
-                        tcx,
-                        ty::TypingEnv::fully_monomorphized(),
-                        def_id,
-                        args,
-                    )
-                    .unwrap();
-                    let symbol = tcx.symbol_name(instance);
-
-                    // Pass a wrapper rather than the function itself as the function itself may not
-                    // be exported from the main codegen unit and may thus be unreachable from the
-                    // object file created by an external assembler.
-                    let wrapper_name = format!(
-                        "__inline_asm_{}_wrapper_n{}",
-                        cx.cgu_name.as_str().replace('.', "__").replace('-', "_"),
-                        cx.inline_asm_index
-                    );
-                    cx.inline_asm_index += 1;
-                    let sig =
-                        get_function_sig(tcx, module.target_config().default_call_conv, instance);
-                    create_wrapper_function(module, sig, &wrapper_name, symbol.name);
-
-                    CInlineAsmOperand::Symbol { symbol: wrapper_name }
-                } else {
-                    span_bug!(span, "invalid type for asm sym (fn)");
-                }
-            }
-            InlineAsmOperand::SymStatic { def_id } => {
-                assert!(tcx.is_static(def_id));
-                let instance = Instance::mono(tcx, def_id);
-                CInlineAsmOperand::Symbol { symbol: tcx.symbol_name(instance).name.to_owned() }
-            }
-            InlineAsmOperand::Label { .. } => {
-                span_bug!(span, "asm! label operands are not yet supported");
-            }
-        })
-        .collect::<Vec<_>>();
-
-    let asm_gen = InlineAssemblyGenerator {
-        tcx,
-        arch: tcx.sess.asm_arch.unwrap(),
-        enclosing_def_id: instance.def_id(),
-        template,
-        operands: &operands,
-        options,
-        registers: Vec::new(),
-        stack_slots_clobber: Vec::new(),
-        stack_slots_input: Vec::new(),
-        stack_slots_output: Vec::new(),
-        stack_slot_size: Size::from_bytes(0),
-        is_naked: true,
-    };
-
-    let generated_asm = asm_gen.generate_asm_wrapper(symbol_name);
-    cx.global_asm.push_str(&generated_asm);
-}
-
 struct InlineAssemblyGenerator<'a, 'tcx> {
     tcx: TyCtxt<'tcx>,
     arch: InlineAsmArch,
@@ -321,13 +212,10 @@ struct InlineAssemblyGenerator<'a, 'tcx> {
     stack_slots_input: Vec<Option<Size>>,
     stack_slots_output: Vec<Option<Size>>,
     stack_slot_size: Size,
-    is_naked: bool,
 }
 
 impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
     fn allocate_registers(&mut self) {
-        assert!(!self.is_naked);
-
         let sess = self.tcx.sess;
         let map = allocatable_registers(
             self.arch,
@@ -451,8 +339,6 @@ impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
     }
 
     fn allocate_stack_slots(&mut self) {
-        assert!(!self.is_naked);
-
         let mut slot_size = Size::from_bytes(0);
         let mut slots_clobber = vec![None; self.operands.len()];
         let mut slots_input = vec![None; self.operands.len()];
@@ -582,32 +468,31 @@ impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
         if is_x86 {
             generated_asm.push_str(".intel_syntax noprefix\n");
         }
-        if !self.is_naked {
-            Self::prologue(&mut generated_asm, self.arch);
-
-            // Save clobbered registers
-            if !self.options.contains(InlineAsmOptions::NORETURN) {
-                for (reg, slot) in self
-                    .registers
-                    .iter()
-                    .zip(self.stack_slots_clobber.iter().copied())
-                    .filter_map(|(r, s)| r.zip(s))
-                {
-                    Self::save_register(&mut generated_asm, self.arch, reg, slot);
-                }
-            }
 
-            // Write input registers
+        Self::prologue(&mut generated_asm, self.arch);
+
+        // Save clobbered registers
+        if !self.options.contains(InlineAsmOptions::NORETURN) {
             for (reg, slot) in self
                 .registers
                 .iter()
-                .zip(self.stack_slots_input.iter().copied())
+                .zip(self.stack_slots_clobber.iter().copied())
                 .filter_map(|(r, s)| r.zip(s))
             {
-                Self::restore_register(&mut generated_asm, self.arch, reg, slot);
+                Self::save_register(&mut generated_asm, self.arch, reg, slot);
             }
         }
 
+        // Write input registers
+        for (reg, slot) in self
+            .registers
+            .iter()
+            .zip(self.stack_slots_input.iter().copied())
+            .filter_map(|(r, s)| r.zip(s))
+        {
+            Self::restore_register(&mut generated_asm, self.arch, reg, slot);
+        }
+
         if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
             generated_asm.push_str(".att_syntax\n");
         }
@@ -701,32 +586,30 @@ impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
             generated_asm.push_str(".intel_syntax noprefix\n");
         }
 
-        if !self.is_naked {
-            if !self.options.contains(InlineAsmOptions::NORETURN) {
-                // Read output registers
-                for (reg, slot) in self
-                    .registers
-                    .iter()
-                    .zip(self.stack_slots_output.iter().copied())
-                    .filter_map(|(r, s)| r.zip(s))
-                {
-                    Self::save_register(&mut generated_asm, self.arch, reg, slot);
-                }
-
-                // Restore clobbered registers
-                for (reg, slot) in self
-                    .registers
-                    .iter()
-                    .zip(self.stack_slots_clobber.iter().copied())
-                    .filter_map(|(r, s)| r.zip(s))
-                {
-                    Self::restore_register(&mut generated_asm, self.arch, reg, slot);
-                }
+        if !self.options.contains(InlineAsmOptions::NORETURN) {
+            // Read output registers
+            for (reg, slot) in self
+                .registers
+                .iter()
+                .zip(self.stack_slots_output.iter().copied())
+                .filter_map(|(r, s)| r.zip(s))
+            {
+                Self::save_register(&mut generated_asm, self.arch, reg, slot);
+            }
 
-                Self::epilogue(&mut generated_asm, self.arch);
-            } else {
-                Self::epilogue_noreturn(&mut generated_asm, self.arch);
+            // Restore clobbered registers
+            for (reg, slot) in self
+                .registers
+                .iter()
+                .zip(self.stack_slots_clobber.iter().copied())
+                .filter_map(|(r, s)| r.zip(s))
+            {
+                Self::restore_register(&mut generated_asm, self.arch, reg, slot);
             }
+
+            Self::epilogue(&mut generated_asm, self.arch);
+        } else {
+            Self::epilogue_noreturn(&mut generated_asm, self.arch);
         }
 
         if is_x86 {
diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs
index 396c6d57950..c35337ae7ce 100644
--- a/compiler/rustc_codegen_gcc/src/asm.rs
+++ b/compiler/rustc_codegen_gcc/src/asm.rs
@@ -829,7 +829,7 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl
 
 impl<'gcc, 'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
     fn codegen_global_asm(
-        &self,
+        &mut self,
         template: &[InlineAsmTemplatePiece],
         operands: &[GlobalAsmOperandRef<'tcx>],
         options: InlineAsmOptions,
diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs
index 9b495174a3f..a9d7808c833 100644
--- a/compiler/rustc_codegen_gcc/src/base.rs
+++ b/compiler/rustc_codegen_gcc/src/base.rs
@@ -206,7 +206,7 @@ pub fn compile_codegen_unit(
             let f128_type_supported = target_info.supports_target_dependent_type(CType::Float128);
             let u128_type_supported = target_info.supports_target_dependent_type(CType::UInt128t);
             // TODO: improve this to avoid passing that many arguments.
-            let cx = CodegenCx::new(
+            let mut cx = CodegenCx::new(
                 &context,
                 cgu,
                 tcx,
@@ -223,8 +223,8 @@ pub fn compile_codegen_unit(
             }
 
             // ... and now that we have everything pre-defined, fill out those definitions.
-            for &(mono_item, _) in &mono_items {
-                mono_item.define::<Builder<'_, '_, '_>>(&cx);
+            for &(mono_item, item_data) in &mono_items {
+                mono_item.define::<Builder<'_, '_, '_>>(&mut cx, item_data);
             }
 
             // If this codegen unit contains the main function, also create the
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index 5c70f4a7df9..6720f6186d1 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -45,7 +45,7 @@ enum ExtremumOperation {
     Min,
 }
 
-pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
+pub struct Builder<'a, 'gcc, 'tcx> {
     pub cx: &'a CodegenCx<'gcc, 'tcx>,
     pub block: Block<'gcc>,
     pub location: Option<Location<'gcc>>,
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 88daa025740..e481b99afcc 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -376,7 +376,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
 
 impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
     fn codegen_global_asm(
-        &self,
+        &mut self,
         template: &[InlineAsmTemplatePiece],
         operands: &[GlobalAsmOperandRef<'tcx>],
         options: InlineAsmOptions,
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
index 6bd27914dbd..e4fac35aa44 100644
--- a/compiler/rustc_codegen_llvm/src/base.rs
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -83,15 +83,15 @@ pub(crate) fn compile_codegen_unit(
         // Instantiate monomorphizations without filling out definitions yet...
         let llvm_module = ModuleLlvm::new(tcx, cgu_name.as_str());
         {
-            let cx = CodegenCx::new(tcx, cgu, &llvm_module);
+            let mut cx = CodegenCx::new(tcx, cgu, &llvm_module);
             let mono_items = cx.codegen_unit.items_in_deterministic_order(cx.tcx);
             for &(mono_item, data) in &mono_items {
                 mono_item.predefine::<Builder<'_, '_, '_>>(&cx, data.linkage, data.visibility);
             }
 
             // ... and now that we have everything pre-defined, fill out those definitions.
-            for &(mono_item, _) in &mono_items {
-                mono_item.define::<Builder<'_, '_, '_>>(&cx);
+            for &(mono_item, item_data) in &mono_items {
+                mono_item.define::<Builder<'_, '_, '_>>(&mut cx, item_data);
             }
 
             // If this codegen unit contains the main function, also create the
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index ae1bdac1655..6412a537a79 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -466,13 +466,9 @@ fn update_target_reliable_float_cfg(sess: &Session, cfg: &mut TargetConfig) {
         _ => true,
     };
 
-    cfg.has_reliable_f16_math = match (target_arch, target_os) {
-        // x86 has a crash for `powi`: <https://github.com/llvm/llvm-project/issues/105747>
-        ("x86" | "x86_64", _) => false,
-        // Assume that working `f16` means working `f16` math for most platforms, since
-        // operations just go through `f32`.
-        _ => true,
-    } && cfg.has_reliable_f16;
+    // Assume that working `f16` means working `f16` math for most platforms, since
+    // operations just go through `f32`.
+    cfg.has_reliable_f16_math = cfg.has_reliable_f16;
 
     cfg.has_reliable_f128_math = match (target_arch, target_os) {
         // LLVM lowers `fp128` math to `long double` symbols even on platforms where
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
index 12b7a487455..f5480da2808 100644
--- a/compiler/rustc_codegen_ssa/src/base.rs
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -12,19 +12,21 @@ use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
 use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
 use rustc_data_structures::sync::{IntoDynSyncSend, par_map};
 use rustc_data_structures::unord::UnordMap;
+use rustc_hir::ItemId;
 use rustc_hir::def_id::{DefId, LOCAL_CRATE};
 use rustc_hir::lang_items::LangItem;
 use rustc_metadata::EncodedMetadata;
-use rustc_middle::bug;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
 use rustc_middle::middle::debugger_visualizer::{DebuggerVisualizerFile, DebuggerVisualizerType};
 use rustc_middle::middle::exported_symbols::SymbolExportKind;
 use rustc_middle::middle::{exported_symbols, lang_items};
 use rustc_middle::mir::BinOp;
+use rustc_middle::mir::interpret::ErrorHandled;
 use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem, MonoItemPartitions};
 use rustc_middle::query::Providers;
 use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
+use rustc_middle::{bug, span_bug};
 use rustc_session::Session;
 use rustc_session::config::{self, CrateType, EntryFnType, OutputType};
 use rustc_span::{DUMMY_SP, Symbol, sym};
@@ -417,6 +419,69 @@ pub(crate) fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
     mir::codegen_mir::<Bx>(cx, instance);
 }
 
+pub fn codegen_global_asm<'tcx, Cx>(cx: &mut Cx, item_id: ItemId)
+where
+    Cx: LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>> + AsmCodegenMethods<'tcx>,
+{
+    let item = cx.tcx().hir_item(item_id);
+    if let rustc_hir::ItemKind::GlobalAsm { asm, .. } = item.kind {
+        let operands: Vec<_> = asm
+            .operands
+            .iter()
+            .map(|(op, op_sp)| match *op {
+                rustc_hir::InlineAsmOperand::Const { ref anon_const } => {
+                    match cx.tcx().const_eval_poly(anon_const.def_id.to_def_id()) {
+                        Ok(const_value) => {
+                            let ty =
+                                cx.tcx().typeck_body(anon_const.body).node_type(anon_const.hir_id);
+                            let string = common::asm_const_to_str(
+                                cx.tcx(),
+                                *op_sp,
+                                const_value,
+                                cx.layout_of(ty),
+                            );
+                            GlobalAsmOperandRef::Const { string }
+                        }
+                        Err(ErrorHandled::Reported { .. }) => {
+                            // An error has already been reported and
+                            // compilation is guaranteed to fail if execution
+                            // hits this path. So an empty string instead of
+                            // a stringified constant value will suffice.
+                            GlobalAsmOperandRef::Const { string: String::new() }
+                        }
+                        Err(ErrorHandled::TooGeneric(_)) => {
+                            span_bug!(*op_sp, "asm const cannot be resolved; too generic")
+                        }
+                    }
+                }
+                rustc_hir::InlineAsmOperand::SymFn { expr } => {
+                    let ty = cx.tcx().typeck(item_id.owner_id).expr_ty(expr);
+                    let instance = match ty.kind() {
+                        &ty::FnDef(def_id, args) => Instance::new(def_id, args),
+                        _ => span_bug!(*op_sp, "asm sym is not a function"),
+                    };
+
+                    GlobalAsmOperandRef::SymFn { instance }
+                }
+                rustc_hir::InlineAsmOperand::SymStatic { path: _, def_id } => {
+                    GlobalAsmOperandRef::SymStatic { def_id }
+                }
+                rustc_hir::InlineAsmOperand::In { .. }
+                | rustc_hir::InlineAsmOperand::Out { .. }
+                | rustc_hir::InlineAsmOperand::InOut { .. }
+                | rustc_hir::InlineAsmOperand::SplitInOut { .. }
+                | rustc_hir::InlineAsmOperand::Label { .. } => {
+                    span_bug!(*op_sp, "invalid operand type for global_asm!")
+                }
+            })
+            .collect();
+
+        cx.codegen_global_asm(asm.template, &operands, asm.options, asm.line_spans);
+    } else {
+        span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type")
+    }
+}
+
 /// Creates the `main` function which will initialize the rust runtime and call
 /// users main function.
 pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
index b0c53ec93ce..5d09e62f274 100644
--- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
+++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
@@ -87,6 +87,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
     let mut link_ordinal_span = None;
     let mut no_sanitize_span = None;
     let mut mixed_export_name_no_mangle_lint_state = MixedExportNameAndNoMangleState::default();
+    let mut no_mangle_span = None;
 
     for attr in attrs.iter() {
         // In some cases, attribute are only valid on functions, but it's the `check_attr`
@@ -139,6 +140,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
             }
             sym::naked => codegen_fn_attrs.flags |= CodegenFnAttrFlags::NAKED,
             sym::no_mangle => {
+                no_mangle_span = Some(attr.span());
                 if tcx.opt_item_name(did.to_def_id()).is_some() {
                     codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
                     mixed_export_name_no_mangle_lint_state.track_no_mangle(
@@ -621,6 +623,34 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
     }
     check_link_name_xor_ordinal(tcx, &codegen_fn_attrs, link_ordinal_span);
 
+    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL)
+        && codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_MANGLE)
+    {
+        let lang_item =
+            lang_items::extract(attrs).map_or(None, |(name, _span)| LangItem::from_name(name));
+        let mut err = tcx
+            .dcx()
+            .struct_span_err(
+                no_mangle_span.unwrap_or_default(),
+                "`#[no_mangle]` cannot be used on internal language items",
+            )
+            .with_note("Rustc requires this item to have a specific mangled name.")
+            .with_span_label(tcx.def_span(did), "should be the internal language item");
+        if let Some(lang_item) = lang_item {
+            if let Some(link_name) = lang_item.link_name() {
+                err = err
+                    .with_note("If you are trying to prevent mangling to ease debugging, many")
+                    .with_note(format!(
+                        "debuggers support a command such as `rbreak {link_name}` to"
+                    ))
+                    .with_note(format!(
+                        "match `.*{link_name}.*` instead of `break {link_name}` on a specific name"
+                    ))
+            }
+        }
+        err.emit();
+    }
+
     // Any linkage to LLVM intrinsics for now forcibly marks them all as never
     // unwinds since LLVM sometimes can't handle codegen which `invoke`s
     // intrinsic functions.
diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs
index b67c871cac9..bfec208c4ae 100644
--- a/compiler/rustc_codegen_ssa/src/lib.rs
+++ b/compiler/rustc_codegen_ssa/src/lib.rs
@@ -14,6 +14,7 @@
 #![feature(string_from_utf8_lossy_owned)]
 #![feature(trait_alias)]
 #![feature(try_blocks)]
+#![recursion_limit = "256"]
 // tidy-alphabetical-end
 
 //! This crate contains codegen code that is used by all codegen backends (LLVM and others).
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
index 6a37889217a..96a04473aba 100644
--- a/compiler/rustc_codegen_ssa/src/mir/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -20,7 +20,7 @@ mod coverageinfo;
 pub mod debuginfo;
 mod intrinsic;
 mod locals;
-mod naked_asm;
+pub mod naked_asm;
 pub mod operand;
 pub mod place;
 mod rvalue;
@@ -178,11 +178,6 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
     debug!("fn_abi: {:?}", fn_abi);
 
-    if tcx.codegen_fn_attrs(instance.def_id()).flags.contains(CodegenFnAttrFlags::NAKED) {
-        crate::mir::naked_asm::codegen_naked_asm::<Bx>(cx, &mir, instance);
-        return;
-    }
-
     if tcx.features().ergonomic_clones() {
         let monomorphized_mir = instance.instantiate_mir_and_normalize_erasing_regions(
             tcx,
diff --git a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs
index 3a6b1f8d4ef..0301ef437c0 100644
--- a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs
@@ -1,23 +1,33 @@
 use rustc_abi::{BackendRepr, Float, Integer, Primitive, RegKind};
 use rustc_attr_parsing::InstructionSetAttr;
 use rustc_hir::def_id::DefId;
-use rustc_middle::mir::mono::{Linkage, MonoItem, MonoItemData, Visibility};
-use rustc_middle::mir::{Body, InlineAsmOperand};
-use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv, LayoutOf};
-use rustc_middle::ty::{Instance, Ty, TyCtxt};
+use rustc_middle::mir::mono::{Linkage, MonoItemData, Visibility};
+use rustc_middle::mir::{InlineAsmOperand, START_BLOCK};
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
+use rustc_middle::ty::{Instance, Ty, TyCtxt, TypeVisitableExt};
 use rustc_middle::{bug, span_bug, ty};
 use rustc_span::sym;
 use rustc_target::callconv::{ArgAbi, FnAbi, PassMode};
 use rustc_target::spec::{BinaryFormat, WasmCAbi};
 
 use crate::common;
-use crate::traits::{AsmCodegenMethods, BuilderMethods, GlobalAsmOperandRef, MiscCodegenMethods};
-
-pub(crate) fn codegen_naked_asm<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
-    cx: &'a Bx::CodegenCx,
-    mir: &Body<'tcx>,
+use crate::mir::AsmCodegenMethods;
+use crate::traits::GlobalAsmOperandRef;
+
+pub fn codegen_naked_asm<
+    'a,
+    'tcx,
+    Cx: LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>>
+        + FnAbiOf<'tcx, FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>>
+        + AsmCodegenMethods<'tcx>,
+>(
+    cx: &'a mut Cx,
     instance: Instance<'tcx>,
+    item_data: MonoItemData,
 ) {
+    assert!(!instance.args.has_infer());
+    let mir = cx.tcx().instance_mir(instance.def);
+
     let rustc_middle::mir::TerminatorKind::InlineAsm {
         asm_macro: _,
         template,
@@ -26,15 +36,14 @@ pub(crate) fn codegen_naked_asm<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
         line_spans,
         targets: _,
         unwind: _,
-    } = mir.basic_blocks.iter().next().unwrap().terminator().kind
+    } = mir.basic_blocks[START_BLOCK].terminator().kind
     else {
         bug!("#[naked] functions should always terminate with an asm! block")
     };
 
     let operands: Vec<_> =
-        operands.iter().map(|op| inline_to_global_operand::<Bx>(cx, instance, op)).collect();
+        operands.iter().map(|op| inline_to_global_operand::<Cx>(cx, instance, op)).collect();
 
-    let item_data = cx.codegen_unit().items().get(&MonoItem::Fn(instance)).unwrap();
     let name = cx.mangled_name(instance);
     let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
     let (begin, end) = prefix_and_suffix(cx.tcx(), instance, &name, item_data, fn_abi);
@@ -47,8 +56,8 @@ pub(crate) fn codegen_naked_asm<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     cx.codegen_global_asm(&template_vec, &operands, options, line_spans);
 }
 
-fn inline_to_global_operand<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
-    cx: &'a Bx::CodegenCx,
+fn inline_to_global_operand<'a, 'tcx, Cx: LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>>>(
+    cx: &'a Cx,
     instance: Instance<'tcx>,
     op: &InlineAsmOperand<'tcx>,
 ) -> GlobalAsmOperandRef<'tcx> {
@@ -108,7 +117,7 @@ fn prefix_and_suffix<'tcx>(
     tcx: TyCtxt<'tcx>,
     instance: Instance<'tcx>,
     asm_name: &str,
-    item_data: &MonoItemData,
+    item_data: MonoItemData,
     fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
 ) -> (String, String) {
     use std::fmt::Write;
@@ -210,8 +219,10 @@ fn prefix_and_suffix<'tcx>(
             writeln!(begin, ".pushsection {section},\"ax\", {progbits}").unwrap();
             writeln!(begin, ".balign {align_bytes}").unwrap();
             write_linkage(&mut begin).unwrap();
-            if let Visibility::Hidden = item_data.visibility {
-                writeln!(begin, ".hidden {asm_name}").unwrap();
+            match item_data.visibility {
+                Visibility::Default => {}
+                Visibility::Protected => writeln!(begin, ".protected {asm_name}").unwrap(),
+                Visibility::Hidden => writeln!(begin, ".hidden {asm_name}").unwrap(),
             }
             writeln!(begin, ".type {asm_name}, {function}").unwrap();
             if !arch_prefix.is_empty() {
@@ -231,8 +242,9 @@ fn prefix_and_suffix<'tcx>(
             writeln!(begin, ".pushsection {},regular,pure_instructions", section).unwrap();
             writeln!(begin, ".balign {align_bytes}").unwrap();
             write_linkage(&mut begin).unwrap();
-            if let Visibility::Hidden = item_data.visibility {
-                writeln!(begin, ".private_extern {asm_name}").unwrap();
+            match item_data.visibility {
+                Visibility::Default | Visibility::Protected => {}
+                Visibility::Hidden => writeln!(begin, ".private_extern {asm_name}").unwrap(),
             }
             writeln!(begin, "{asm_name}:").unwrap();
 
diff --git a/compiler/rustc_codegen_ssa/src/mono_item.rs b/compiler/rustc_codegen_ssa/src/mono_item.rs
index f6af889fd6e..c2067e52afe 100644
--- a/compiler/rustc_codegen_ssa/src/mono_item.rs
+++ b/compiler/rustc_codegen_ssa/src/mono_item.rs
@@ -1,17 +1,18 @@
-use rustc_hir as hir;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
-use rustc_middle::mir::interpret::ErrorHandled;
-use rustc_middle::mir::mono::{Linkage, MonoItem, Visibility};
-use rustc_middle::ty::Instance;
-use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
-use rustc_middle::{span_bug, ty};
+use rustc_middle::mir::mono::{Linkage, MonoItem, MonoItemData, Visibility};
+use rustc_middle::ty::layout::HasTyCtxt;
 use tracing::debug;
 
+use crate::base;
+use crate::mir::naked_asm;
 use crate::traits::*;
-use crate::{base, common};
 
 pub trait MonoItemExt<'a, 'tcx> {
-    fn define<Bx: BuilderMethods<'a, 'tcx>>(&self, cx: &'a Bx::CodegenCx);
+    fn define<Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        cx: &'a mut Bx::CodegenCx,
+        item_data: MonoItemData,
+    );
     fn predefine<Bx: BuilderMethods<'a, 'tcx>>(
         &self,
         cx: &'a Bx::CodegenCx,
@@ -22,7 +23,11 @@ pub trait MonoItemExt<'a, 'tcx> {
 }
 
 impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {
-    fn define<Bx: BuilderMethods<'a, 'tcx>>(&self, cx: &'a Bx::CodegenCx) {
+    fn define<Bx: BuilderMethods<'a, 'tcx>>(
+        &self,
+        cx: &'a mut Bx::CodegenCx,
+        item_data: MonoItemData,
+    ) {
         debug!(
             "BEGIN IMPLEMENTING '{} ({})' in cgu {}",
             self,
@@ -35,71 +40,19 @@ impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {
                 cx.codegen_static(def_id);
             }
             MonoItem::GlobalAsm(item_id) => {
-                let item = cx.tcx().hir_item(item_id);
-                if let hir::ItemKind::GlobalAsm { asm, .. } = item.kind {
-                    let operands: Vec<_> = asm
-                        .operands
-                        .iter()
-                        .map(|(op, op_sp)| match *op {
-                            hir::InlineAsmOperand::Const { ref anon_const } => {
-                                match cx.tcx().const_eval_poly(anon_const.def_id.to_def_id()) {
-                                    Ok(const_value) => {
-                                        let ty = cx
-                                            .tcx()
-                                            .typeck_body(anon_const.body)
-                                            .node_type(anon_const.hir_id);
-                                        let string = common::asm_const_to_str(
-                                            cx.tcx(),
-                                            *op_sp,
-                                            const_value,
-                                            cx.layout_of(ty),
-                                        );
-                                        GlobalAsmOperandRef::Const { string }
-                                    }
-                                    Err(ErrorHandled::Reported { .. }) => {
-                                        // An error has already been reported and
-                                        // compilation is guaranteed to fail if execution
-                                        // hits this path. So an empty string instead of
-                                        // a stringified constant value will suffice.
-                                        GlobalAsmOperandRef::Const { string: String::new() }
-                                    }
-                                    Err(ErrorHandled::TooGeneric(_)) => {
-                                        span_bug!(
-                                            *op_sp,
-                                            "asm const cannot be resolved; too generic"
-                                        )
-                                    }
-                                }
-                            }
-                            hir::InlineAsmOperand::SymFn { expr } => {
-                                let ty = cx.tcx().typeck(item_id.owner_id).expr_ty(expr);
-                                let instance = match ty.kind() {
-                                    &ty::FnDef(def_id, args) => Instance::new(def_id, args),
-                                    _ => span_bug!(*op_sp, "asm sym is not a function"),
-                                };
-
-                                GlobalAsmOperandRef::SymFn { instance }
-                            }
-                            hir::InlineAsmOperand::SymStatic { path: _, def_id } => {
-                                GlobalAsmOperandRef::SymStatic { def_id }
-                            }
-                            hir::InlineAsmOperand::In { .. }
-                            | hir::InlineAsmOperand::Out { .. }
-                            | hir::InlineAsmOperand::InOut { .. }
-                            | hir::InlineAsmOperand::SplitInOut { .. }
-                            | hir::InlineAsmOperand::Label { .. } => {
-                                span_bug!(*op_sp, "invalid operand type for global_asm!")
-                            }
-                        })
-                        .collect();
-
-                    cx.codegen_global_asm(asm.template, &operands, asm.options, asm.line_spans);
-                } else {
-                    span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type")
-                }
+                base::codegen_global_asm(cx, item_id);
             }
             MonoItem::Fn(instance) => {
-                base::codegen_instance::<Bx>(cx, instance);
+                if cx
+                    .tcx()
+                    .codegen_fn_attrs(instance.def_id())
+                    .flags
+                    .contains(CodegenFnAttrFlags::NAKED)
+                {
+                    naked_asm::codegen_naked_asm::<Bx::CodegenCx>(cx, instance, item_data);
+                } else {
+                    base::codegen_instance::<Bx>(cx, instance);
+                }
             }
         }
 
diff --git a/compiler/rustc_codegen_ssa/src/traits/asm.rs b/compiler/rustc_codegen_ssa/src/traits/asm.rs
index 7767bffbfbf..cc7a6a3f19e 100644
--- a/compiler/rustc_codegen_ssa/src/traits/asm.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/asm.rs
@@ -62,7 +62,7 @@ pub trait AsmBuilderMethods<'tcx>: BackendTypes {
 
 pub trait AsmCodegenMethods<'tcx> {
     fn codegen_global_asm(
-        &self,
+        &mut self,
         template: &[InlineAsmTemplatePiece],
         operands: &[GlobalAsmOperandRef<'tcx>],
         options: InlineAsmOptions,
diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs
index e6b50cc7c0c..e2f1458d062 100644
--- a/compiler/rustc_codegen_ssa/src/traits/backend.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs
@@ -45,15 +45,14 @@ pub trait CodegenBackend {
 
     fn print(&self, _req: &PrintRequest, _out: &mut String, _sess: &Session) {}
 
-    /// Returns two feature sets:
-    /// - The first has the features that should be set in `cfg(target_features)`.
-    /// - The second is like the first, but also includes unstable features.
-    ///
-    /// RUSTC_SPECIFIC_FEATURES should be skipped here, those are handled outside codegen.
+    /// Collect target-specific options that should be set in `cfg(...)`, including
+    /// `target_feature` and support for unstable float types.
     fn target_config(&self, _sess: &Session) -> TargetConfig {
         TargetConfig {
             target_features: vec![],
             unstable_target_features: vec![],
+            // `true` is used as a default so backends need to acknowledge when they do not
+            // support the float types, rather than accidentally quietly skipping all tests.
             has_reliable_f16: true,
             has_reliable_f16_math: true,
             has_reliable_f128: true,
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index 40c63f2b250..d67b547ba1a 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -61,16 +61,21 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
             ensure_monomorphic_enough(tcx, tp_ty)?;
             ConstValue::from_u128(tcx.type_id_hash(tp_ty).as_u128())
         }
-        sym::variant_count => match tp_ty.kind() {
+        sym::variant_count => match match tp_ty.kind() {
+            // Pattern types have the same number of variants as their base type.
+            // Even if we restrict e.g. which variants are valid, the variants are essentially just uninhabited.
+            // And `Result<(), !>` still has two variants according to `variant_count`.
+            ty::Pat(base, _) => *base,
+            _ => tp_ty,
+        }
+        .kind()
+        {
             // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
             ty::Adt(adt, _) => ConstValue::from_target_usize(adt.variants().len() as u64, &tcx),
             ty::Alias(..) | ty::Param(_) | ty::Placeholder(_) | ty::Infer(_) => {
                 throw_inval!(TooGeneric)
             }
-            ty::Pat(_, pat) => match **pat {
-                ty::PatternKind::Range { .. } => ConstValue::from_target_usize(0u64, &tcx),
-                // Future pattern kinds may have more variants
-            },
+            ty::Pat(..) => unreachable!(),
             ty::Bound(_, _) => bug!("bound ty during ctfe"),
             ty::Bool
             | ty::Char
@@ -178,8 +183,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
 
                 let res = self.binary_op(op, &a, &b)?;
                 // `binary_op` already called `generate_nan` if needed.
-
-                // FIXME: Miri should add some non-determinism to the result here to catch any dependences on exact computations. This has previously been done, but the behaviour was removed as part of constification.
+                let res = M::apply_float_nondet(self, res)?;
                 self.write_immediate(*res, dest)?;
             }
 
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index e5026eff21f..a1386b4e1be 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -276,6 +276,14 @@ pub trait Machine<'tcx>: Sized {
         F2::NAN
     }
 
+    /// Apply non-determinism to float operations that do not return a precise result.
+    fn apply_float_nondet(
+        _ecx: &mut InterpCx<'tcx, Self>,
+        val: ImmTy<'tcx, Self::Provenance>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>> {
+        interp_ok(val)
+    }
+
     /// Determines the result of `min`/`max` on floats when the arguments are equal.
     fn equal_float_min_max<F: Float>(_ecx: &InterpCx<'tcx, Self>, a: F, _b: F) -> F {
         // By default, we pick the left argument.
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index fb7ba6d7ef5..c86af5a9a4b 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -1248,6 +1248,14 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
                     // Range patterns are precisely reflected into `valid_range` and thus
                     // handled fully by `visit_scalar` (called below).
                     ty::PatternKind::Range { .. } => {},
+
+                    // FIXME(pattern_types): check that the value is covered by one of the variants.
+                    // For now, we rely on layout computation setting the scalar's `valid_range` to
+                    // match the pattern. However, this cannot always work; the layout may
+                    // pessimistically cover actually illegal ranges and Miri would miss that UB.
+                    // The consolation here is that codegen also will miss that UB, so at least
+                    // we won't see optimizations actually breaking such programs.
+                    ty::PatternKind::Or(_patterns) => {}
                 }
             }
             _ => {
diff --git a/compiler/rustc_expand/src/build.rs b/compiler/rustc_expand/src/build.rs
index 6d616cf84bb..14b8cc90d97 100644
--- a/compiler/rustc_expand/src/build.rs
+++ b/compiler/rustc_expand/src/build.rs
@@ -1,8 +1,10 @@
 use rustc_ast::ptr::P;
+use rustc_ast::token::Delimiter;
+use rustc_ast::tokenstream::TokenStream;
 use rustc_ast::util::literal;
 use rustc_ast::{
     self as ast, AnonConst, AttrVec, BlockCheckMode, Expr, LocalKind, MatchKind, PatKind, UnOp,
-    attr, token,
+    attr, token, tokenstream,
 };
 use rustc_span::source_map::Spanned;
 use rustc_span::{DUMMY_SP, Ident, Span, Symbol, kw, sym};
@@ -55,13 +57,13 @@ impl<'a> ExtCtxt<'a> {
         &self,
         span: Span,
         path: ast::Path,
-        delim: ast::token::Delimiter,
-        tokens: ast::tokenstream::TokenStream,
+        delim: Delimiter,
+        tokens: TokenStream,
     ) -> P<ast::MacCall> {
         P(ast::MacCall {
             path,
             args: P(ast::DelimArgs {
-                dspan: ast::tokenstream::DelimSpan { open: span, close: span },
+                dspan: tokenstream::DelimSpan { open: span, close: span },
                 delim,
                 tokens,
             }),
@@ -480,8 +482,8 @@ impl<'a> ExtCtxt<'a> {
                     span,
                     [sym::std, sym::unreachable].map(|s| Ident::new(s, span)).to_vec(),
                 ),
-                ast::token::Delimiter::Parenthesis,
-                ast::tokenstream::TokenStream::default(),
+                Delimiter::Parenthesis,
+                TokenStream::default(),
             ),
         )
     }
diff --git a/compiler/rustc_expand/src/config.rs b/compiler/rustc_expand/src/config.rs
index d2e45d717d9..2df3281568b 100644
--- a/compiler/rustc_expand/src/config.rs
+++ b/compiler/rustc_expand/src/config.rs
@@ -162,7 +162,7 @@ pub(crate) fn attr_into_trace(mut attr: Attribute, trace_name: Symbol) -> Attrib
             let NormalAttr { item, tokens } = &mut **normal;
             item.path.segments[0].ident.name = trace_name;
             // This makes the trace attributes unobservable to token-based proc macros.
-            *tokens = Some(LazyAttrTokenStream::new(AttrTokenStream::default()));
+            *tokens = Some(LazyAttrTokenStream::new_direct(AttrTokenStream::default()));
         }
         AttrKind::DocComment(..) => unreachable!(),
     }
@@ -192,7 +192,7 @@ impl<'a> StripUnconfigured<'a> {
         if self.config_tokens {
             if let Some(Some(tokens)) = node.tokens_mut() {
                 let attr_stream = tokens.to_attr_token_stream();
-                *tokens = LazyAttrTokenStream::new(self.configure_tokens(&attr_stream));
+                *tokens = LazyAttrTokenStream::new_direct(self.configure_tokens(&attr_stream));
             }
         }
     }
@@ -223,7 +223,7 @@ impl<'a> StripUnconfigured<'a> {
                     target.attrs.flat_map_in_place(|attr| self.process_cfg_attr(&attr));
 
                     if self.in_cfg(&target.attrs) {
-                        target.tokens = LazyAttrTokenStream::new(
+                        target.tokens = LazyAttrTokenStream::new_direct(
                             self.configure_tokens(&target.tokens.to_attr_token_stream()),
                         );
                         Some(AttrTokenTree::AttrsTarget(target))
@@ -361,7 +361,7 @@ impl<'a> StripUnconfigured<'a> {
                 .to_attr_token_stream(),
         ));
 
-        let tokens = Some(LazyAttrTokenStream::new(AttrTokenStream::new(trees)));
+        let tokens = Some(LazyAttrTokenStream::new_direct(AttrTokenStream::new(trees)));
         let attr = ast::attr::mk_attr_from_item(
             &self.sess.psess.attr_id_generator,
             item,
diff --git a/compiler/rustc_expand/src/mbe/transcribe.rs b/compiler/rustc_expand/src/mbe/transcribe.rs
index 39186319b1c..2d3fd7702da 100644
--- a/compiler/rustc_expand/src/mbe/transcribe.rs
+++ b/compiler/rustc_expand/src/mbe/transcribe.rs
@@ -1,6 +1,5 @@
 use std::mem;
 
-use rustc_ast::mut_visit::{self, MutVisitor};
 use rustc_ast::token::{
     self, Delimiter, IdentIsRaw, InvisibleOrigin, Lit, LitKind, MetaVarKind, Token, TokenKind,
 };
@@ -29,10 +28,8 @@ use crate::mbe::{self, KleeneOp, MetaVarExpr};
 // A Marker adds the given mark to the syntax context.
 struct Marker(LocalExpnId, Transparency, FxHashMap<SyntaxContext, SyntaxContext>);
 
-impl MutVisitor for Marker {
-    const VISIT_TOKENS: bool = true;
-
-    fn visit_span(&mut self, span: &mut Span) {
+impl Marker {
+    fn mark_span(&mut self, span: &mut Span) {
         // `apply_mark` is a relatively expensive operation, both due to taking hygiene lock, and
         // by itself. All tokens in a macro body typically have the same syntactic context, unless
         // it's some advanced case with macro-generated macros. So if we cache the marked version
@@ -292,7 +289,7 @@ pub(super) fn transcribe<'a>(
 
                         // Emit as a token stream within `Delimiter::Invisible` to maintain
                         // parsing priorities.
-                        marker.visit_span(&mut sp);
+                        marker.mark_span(&mut sp);
                         with_metavar_spans(|mspans| mspans.insert(mk_span, sp));
                         // Both the open delim and close delim get the same span, which covers the
                         // `$foo` in the decl macro RHS.
@@ -312,13 +309,13 @@ pub(super) fn transcribe<'a>(
                             maybe_use_metavar_location(psess, &stack, sp, tt, &mut marker)
                         }
                         MatchedSingle(ParseNtResult::Ident(ident, is_raw)) => {
-                            marker.visit_span(&mut sp);
+                            marker.mark_span(&mut sp);
                             with_metavar_spans(|mspans| mspans.insert(ident.span, sp));
                             let kind = token::NtIdent(*ident, *is_raw);
                             TokenTree::token_alone(kind, sp)
                         }
                         MatchedSingle(ParseNtResult::Lifetime(ident, is_raw)) => {
-                            marker.visit_span(&mut sp);
+                            marker.mark_span(&mut sp);
                             with_metavar_spans(|mspans| mspans.insert(ident.span, sp));
                             let kind = token::NtLifetime(*ident, *is_raw);
                             TokenTree::token_alone(kind, sp)
@@ -400,8 +397,8 @@ pub(super) fn transcribe<'a>(
                 } else {
                     // If we aren't able to match the meta-var, we push it back into the result but
                     // with modified syntax context. (I believe this supports nested macros).
-                    marker.visit_span(&mut sp);
-                    marker.visit_ident(&mut original_ident);
+                    marker.mark_span(&mut sp);
+                    marker.mark_span(&mut original_ident.span);
                     result.push(TokenTree::token_joint_hidden(token::Dollar, sp));
                     result.push(TokenTree::Token(
                         Token::from_ast_ident(original_ident),
@@ -430,16 +427,19 @@ pub(super) fn transcribe<'a>(
             // jump back out of the Delimited, pop the result_stack and add the new results back to
             // the previous results (from outside the Delimited).
             &mbe::TokenTree::Delimited(mut span, ref spacing, ref delimited) => {
-                mut_visit::visit_delim_span(&mut marker, &mut span);
+                marker.mark_span(&mut span.open);
+                marker.mark_span(&mut span.close);
                 stack.push(Frame::new_delimited(delimited, span, *spacing));
                 result_stack.push(mem::take(&mut result));
             }
 
             // Nothing much to do here. Just push the token to the result, being careful to
             // preserve syntax context.
-            mbe::TokenTree::Token(token) => {
-                let mut token = *token;
-                mut_visit::visit_token(&mut marker, &mut token);
+            &mbe::TokenTree::Token(mut token) => {
+                marker.mark_span(&mut token.span);
+                if let token::NtIdent(ident, _) | token::NtLifetime(ident, _) = &mut token.kind {
+                    marker.mark_span(&mut ident.span);
+                }
                 let tt = TokenTree::Token(token, Spacing::Alone);
                 result.push(tt);
             }
@@ -504,7 +504,7 @@ fn maybe_use_metavar_location(
         return orig_tt.clone();
     }
 
-    marker.visit_span(&mut metavar_span);
+    marker.mark_span(&mut metavar_span);
     let no_collision = match orig_tt {
         TokenTree::Token(token, ..) => {
             with_metavar_spans(|mspans| mspans.insert(token.span, metavar_span))
@@ -774,7 +774,7 @@ fn transcribe_metavar_expr<'a>(
 ) -> PResult<'a, ()> {
     let mut visited_span = || {
         let mut span = sp.entire();
-        marker.visit_span(&mut span);
+        marker.mark_span(&mut span);
         span
     };
     match *expr {
diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs
index 2f8a8534247..af587ee5bdc 100644
--- a/compiler/rustc_hir/src/hir.rs
+++ b/compiler/rustc_hir/src/hir.rs
@@ -1813,6 +1813,9 @@ pub enum TyPatKind<'hir> {
     /// A range pattern (e.g., `1..=2` or `1..2`).
     Range(&'hir ConstArg<'hir>, &'hir ConstArg<'hir>),
 
+    /// A list of patterns where only one needs to be satisfied
+    Or(&'hir [TyPat<'hir>]),
+
     /// A placeholder for a pattern that wasn't well formed in some way.
     Err(ErrorGuaranteed),
 }
diff --git a/compiler/rustc_hir/src/intravisit.rs b/compiler/rustc_hir/src/intravisit.rs
index 3c2897ef1d9..a60de4b1fc3 100644
--- a/compiler/rustc_hir/src/intravisit.rs
+++ b/compiler/rustc_hir/src/intravisit.rs
@@ -710,6 +710,7 @@ pub fn walk_ty_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v TyPat<'v>)
             try_visit!(visitor.visit_const_arg_unambig(lower_bound));
             try_visit!(visitor.visit_const_arg_unambig(upper_bound));
         }
+        TyPatKind::Or(patterns) => walk_list!(visitor, visit_pattern_type_pattern, patterns),
         TyPatKind::Err(_) => (),
     }
     V::Result::output()
diff --git a/compiler/rustc_hir/src/lib.rs b/compiler/rustc_hir/src/lib.rs
index 32064f96dd6..5533920aee4 100644
--- a/compiler/rustc_hir/src/lib.rs
+++ b/compiler/rustc_hir/src/lib.rs
@@ -14,6 +14,7 @@
 #![feature(never_type)]
 #![feature(rustc_attrs)]
 #![feature(variant_count)]
+#![recursion_limit = "256"]
 // tidy-alphabetical-end
 
 extern crate self as rustc_hir;
diff --git a/compiler/rustc_hir_analysis/src/collect/type_of.rs b/compiler/rustc_hir_analysis/src/collect/type_of.rs
index 694c1228859..c20b14df770 100644
--- a/compiler/rustc_hir_analysis/src/collect/type_of.rs
+++ b/compiler/rustc_hir_analysis/src/collect/type_of.rs
@@ -94,10 +94,12 @@ fn const_arg_anon_type_of<'tcx>(icx: &ItemCtxt<'tcx>, arg_hir_id: HirId, span: S
         }
 
         Node::TyPat(pat) => {
-            let hir::TyKind::Pat(ty, p) = tcx.parent_hir_node(pat.hir_id).expect_ty().kind else {
-                bug!()
+            let node = match tcx.parent_hir_node(pat.hir_id) {
+                // Or patterns can be nested one level deep
+                Node::TyPat(p) => tcx.parent_hir_node(p.hir_id),
+                other => other,
             };
-            assert_eq!(p.hir_id, pat.hir_id);
+            let hir::TyKind::Pat(ty, _) = node.expect_ty().kind else { bug!() };
             icx.lower_ty(ty)
         }
 
diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs
index 8153e6f87f8..fcb7382549f 100644
--- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs
+++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs
@@ -2715,30 +2715,9 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
             hir::TyKind::Pat(ty, pat) => {
                 let ty_span = ty.span;
                 let ty = self.lower_ty(ty);
-                let pat_ty = match pat.kind {
-                    hir::TyPatKind::Range(start, end) => {
-                        let (ty, start, end) = match ty.kind() {
-                            // Keep this list of types in sync with the list of types that
-                            // the `RangePattern` trait is implemented for.
-                            ty::Int(_) | ty::Uint(_) | ty::Char => {
-                                let start = self.lower_const_arg(start, FeedConstTy::No);
-                                let end = self.lower_const_arg(end, FeedConstTy::No);
-                                (ty, start, end)
-                            }
-                            _ => {
-                                let guar = self.dcx().span_delayed_bug(
-                                    ty_span,
-                                    "invalid base type for range pattern",
-                                );
-                                let errc = ty::Const::new_error(tcx, guar);
-                                (Ty::new_error(tcx, guar), errc, errc)
-                            }
-                        };
-
-                        let pat = tcx.mk_pat(ty::PatternKind::Range { start, end });
-                        Ty::new_pat(tcx, ty, pat)
-                    }
-                    hir::TyPatKind::Err(e) => Ty::new_error(tcx, e),
+                let pat_ty = match self.lower_pat_ty_pat(ty, ty_span, pat) {
+                    Ok(kind) => Ty::new_pat(tcx, ty, tcx.mk_pat(kind)),
+                    Err(guar) => Ty::new_error(tcx, guar),
                 };
                 self.record_ty(pat.hir_id, ty, pat.span);
                 pat_ty
@@ -2750,6 +2729,39 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
         result_ty
     }
 
+    fn lower_pat_ty_pat(
+        &self,
+        ty: Ty<'tcx>,
+        ty_span: Span,
+        pat: &hir::TyPat<'tcx>,
+    ) -> Result<ty::PatternKind<'tcx>, ErrorGuaranteed> {
+        let tcx = self.tcx();
+        match pat.kind {
+            hir::TyPatKind::Range(start, end) => {
+                match ty.kind() {
+                    // Keep this list of types in sync with the list of types that
+                    // the `RangePattern` trait is implemented for.
+                    ty::Int(_) | ty::Uint(_) | ty::Char => {
+                        let start = self.lower_const_arg(start, FeedConstTy::No);
+                        let end = self.lower_const_arg(end, FeedConstTy::No);
+                        Ok(ty::PatternKind::Range { start, end })
+                    }
+                    _ => Err(self
+                        .dcx()
+                        .span_delayed_bug(ty_span, "invalid base type for range pattern")),
+                }
+            }
+            hir::TyPatKind::Or(patterns) => {
+                self.tcx()
+                    .mk_patterns_from_iter(patterns.iter().map(|pat| {
+                        self.lower_pat_ty_pat(ty, ty_span, pat).map(|pat| tcx.mk_pat(pat))
+                    }))
+                    .map(ty::PatternKind::Or)
+            }
+            hir::TyPatKind::Err(e) => Err(e),
+        }
+    }
+
     /// Lower an opaque type (i.e., an existential impl-Trait type) from the HIR.
     #[instrument(level = "debug", skip(self), ret)]
     fn lower_opaque_ty(&self, def_id: LocalDefId, in_trait: bool) -> Ty<'tcx> {
diff --git a/compiler/rustc_hir_analysis/src/variance/constraints.rs b/compiler/rustc_hir_analysis/src/variance/constraints.rs
index dc3ce1dd76c..92cfece77c4 100644
--- a/compiler/rustc_hir_analysis/src/variance/constraints.rs
+++ b/compiler/rustc_hir_analysis/src/variance/constraints.rs
@@ -251,12 +251,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
             }
 
             ty::Pat(typ, pat) => {
-                match *pat {
-                    ty::PatternKind::Range { start, end } => {
-                        self.add_constraints_from_const(current, start, variance);
-                        self.add_constraints_from_const(current, end, variance);
-                    }
-                }
+                self.add_constraints_from_pat(current, variance, pat);
                 self.add_constraints_from_ty(current, typ, variance);
             }
 
@@ -334,6 +329,25 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
         }
     }
 
+    fn add_constraints_from_pat(
+        &mut self,
+        current: &CurrentItem,
+        variance: VarianceTermPtr<'a>,
+        pat: ty::Pattern<'tcx>,
+    ) {
+        match *pat {
+            ty::PatternKind::Range { start, end } => {
+                self.add_constraints_from_const(current, start, variance);
+                self.add_constraints_from_const(current, end, variance);
+            }
+            ty::PatternKind::Or(patterns) => {
+                for pat in patterns {
+                    self.add_constraints_from_pat(current, variance, pat)
+                }
+            }
+        }
+    }
+
     /// Adds constraints appropriate for a nominal type (enum, struct,
     /// object, etc) appearing in a context with ambient variance `variance`
     fn add_constraints_from_args(
diff --git a/compiler/rustc_hir_pretty/src/lib.rs b/compiler/rustc_hir_pretty/src/lib.rs
index b878147522d..15e997aebcb 100644
--- a/compiler/rustc_hir_pretty/src/lib.rs
+++ b/compiler/rustc_hir_pretty/src/lib.rs
@@ -146,6 +146,7 @@ impl<'a> State<'a> {
                     false,
                     None,
                     *delim,
+                    None,
                     &tokens,
                     true,
                     span,
@@ -1882,6 +1883,19 @@ impl<'a> State<'a> {
                 self.word("..=");
                 self.print_const_arg(end);
             }
+            TyPatKind::Or(patterns) => {
+                self.popen();
+                let mut first = true;
+                for pat in patterns {
+                    if first {
+                        first = false;
+                    } else {
+                        self.word(" | ");
+                    }
+                    self.print_ty_pat(pat);
+                }
+                self.pclose();
+            }
             TyPatKind::Err(_) => {
                 self.popen();
                 self.word("/*ERROR*/");
diff --git a/compiler/rustc_hir_typeck/src/callee.rs b/compiler/rustc_hir_typeck/src/callee.rs
index 5f5e9e45612..f555d116c52 100644
--- a/compiler/rustc_hir_typeck/src/callee.rs
+++ b/compiler/rustc_hir_typeck/src/callee.rs
@@ -14,7 +14,7 @@ use rustc_middle::ty::adjustment::{
 use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt, TypeVisitableExt};
 use rustc_middle::{bug, span_bug};
 use rustc_span::def_id::LocalDefId;
-use rustc_span::{Ident, Span, sym};
+use rustc_span::{Span, sym};
 use rustc_trait_selection::error_reporting::traits::DefIdOrName;
 use rustc_trait_selection::infer::InferCtxtExt as _;
 use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
@@ -87,14 +87,29 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
 
         let output = match result {
             None => {
-                // this will report an error since original_callee_ty is not a fn
-                self.confirm_builtin_call(
-                    call_expr,
-                    callee_expr,
-                    original_callee_ty,
-                    arg_exprs,
-                    expected,
-                )
+                // Check all of the arg expressions, but with no expectations
+                // since we don't have a signature to compare them to.
+                for arg in arg_exprs {
+                    self.check_expr(arg);
+                }
+
+                if let hir::ExprKind::Path(hir::QPath::Resolved(_, path)) = &callee_expr.kind
+                    && let [segment] = path.segments
+                {
+                    self.dcx().try_steal_modify_and_emit_err(
+                        segment.ident.span,
+                        StashKey::CallIntoMethod,
+                        |err| {
+                            // Try suggesting `foo(a)` -> `a.foo()` if possible.
+                            self.suggest_call_as_method(
+                                err, segment, arg_exprs, call_expr, expected,
+                            );
+                        },
+                    );
+                }
+
+                let guar = self.report_invalid_callee(call_expr, callee_expr, expr_ty, arg_exprs);
+                Ty::new_error(self.tcx, guar)
             }
 
             Some(CallStep::Builtin(callee_ty)) => {
@@ -296,9 +311,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                 Ty::new_tup_from_iter(self.tcx, arg_exprs.iter().map(|e| self.next_ty_var(e.span)))
             });
 
-            if let Some(ok) = self.lookup_method_in_trait(
+            if let Some(ok) = self.lookup_method_for_operator(
                 self.misc(call_expr.span),
-                Ident::with_dummy_span(method_name),
+                method_name,
                 trait_def_id,
                 adjusted_ty,
                 opt_input_type,
@@ -461,32 +476,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                 }
                 (fn_sig, Some(def_id))
             }
+
             // FIXME(const_trait_impl): these arms should error because we can't enforce them
             ty::FnPtr(sig_tys, hdr) => (sig_tys.with(hdr), None),
-            _ => {
-                for arg in arg_exprs {
-                    self.check_expr(arg);
-                }
 
-                if let hir::ExprKind::Path(hir::QPath::Resolved(_, path)) = &callee_expr.kind
-                    && let [segment] = path.segments
-                {
-                    self.dcx().try_steal_modify_and_emit_err(
-                        segment.ident.span,
-                        StashKey::CallIntoMethod,
-                        |err| {
-                            // Try suggesting `foo(a)` -> `a.foo()` if possible.
-                            self.suggest_call_as_method(
-                                err, segment, arg_exprs, call_expr, expected,
-                            );
-                        },
-                    );
-                }
-
-                let err = self.report_invalid_callee(call_expr, callee_expr, callee_ty, arg_exprs);
-
-                return Ty::new_error(self.tcx, err);
-            }
+            _ => unreachable!(),
         };
 
         // Replace any late-bound regions that appear in the function
@@ -908,19 +902,23 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         call_expr: &'tcx hir::Expr<'tcx>,
         arg_exprs: &'tcx [hir::Expr<'tcx>],
         expected: Expectation<'tcx>,
-        method_callee: MethodCallee<'tcx>,
+        method: MethodCallee<'tcx>,
     ) -> Ty<'tcx> {
-        let output_type = self.check_method_argument_types(
+        self.check_argument_types(
             call_expr.span,
             call_expr,
-            Ok(method_callee),
+            &method.sig.inputs()[1..],
+            method.sig.output(),
+            expected,
             arg_exprs,
+            method.sig.c_variadic,
             TupleArgumentsFlag::TupleArguments,
-            expected,
+            Some(method.def_id),
         );
 
-        self.write_method_call_and_enforce_effects(call_expr.hir_id, call_expr.span, method_callee);
-        output_type
+        self.write_method_call_and_enforce_effects(call_expr.hir_id, call_expr.span, method);
+
+        method.sig.output()
     }
 }
 
diff --git a/compiler/rustc_hir_typeck/src/expr.rs b/compiler/rustc_hir_typeck/src/expr.rs
index 9c6d4ee096f..db2650ed357 100644
--- a/compiler/rustc_hir_typeck/src/expr.rs
+++ b/compiler/rustc_hir_typeck/src/expr.rs
@@ -40,7 +40,6 @@ use tracing::{debug, instrument, trace};
 use {rustc_ast as ast, rustc_hir as hir};
 
 use crate::Expectation::{self, ExpectCastableToType, ExpectHasType, NoExpectation};
-use crate::TupleArgumentsFlag::DontTupleArguments;
 use crate::coercion::{CoerceMany, DynamicCoerceMany};
 use crate::errors::{
     AddressOfTemporaryTaken, BaseExpressionDoubleDot, BaseExpressionDoubleDotAddExpr,
@@ -51,8 +50,8 @@ use crate::errors::{
     YieldExprOutsideOfCoroutine,
 };
 use crate::{
-    BreakableCtxt, CoroutineTypes, Diverges, FnCtxt, Needs, cast, fatally_break_rust,
-    report_unexpected_variant_res, type_error_struct,
+    BreakableCtxt, CoroutineTypes, Diverges, FnCtxt, Needs, TupleArgumentsFlag, cast,
+    fatally_break_rust, report_unexpected_variant_res, type_error_struct,
 };
 
 impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
@@ -1591,28 +1590,45 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         // no need to check for bot/err -- callee does that
         let rcvr_t = self.structurally_resolve_type(rcvr.span, rcvr_t);
 
-        let method = match self.lookup_method(rcvr_t, segment, segment.ident.span, expr, rcvr, args)
-        {
+        match self.lookup_method(rcvr_t, segment, segment.ident.span, expr, rcvr, args) {
             Ok(method) => {
-                // We could add a "consider `foo::<params>`" suggestion here, but I wasn't able to
-                // trigger this codepath causing `structurally_resolve_type` to emit an error.
                 self.write_method_call_and_enforce_effects(expr.hir_id, expr.span, method);
-                Ok(method)
+
+                self.check_argument_types(
+                    segment.ident.span,
+                    expr,
+                    &method.sig.inputs()[1..],
+                    method.sig.output(),
+                    expected,
+                    args,
+                    method.sig.c_variadic,
+                    TupleArgumentsFlag::DontTupleArguments,
+                    Some(method.def_id),
+                );
+
+                method.sig.output()
             }
             Err(error) => {
-                Err(self.report_method_error(expr.hir_id, rcvr_t, error, expected, false))
-            }
-        };
+                let guar = self.report_method_error(expr.hir_id, rcvr_t, error, expected, false);
 
-        // Call the generic checker.
-        self.check_method_argument_types(
-            segment.ident.span,
-            expr,
-            method,
-            args,
-            DontTupleArguments,
-            expected,
-        )
+                let err_inputs = self.err_args(args.len(), guar);
+                let err_output = Ty::new_error(self.tcx, guar);
+
+                self.check_argument_types(
+                    segment.ident.span,
+                    expr,
+                    &err_inputs,
+                    err_output,
+                    NoExpectation,
+                    args,
+                    false,
+                    TupleArgumentsFlag::DontTupleArguments,
+                    None,
+                );
+
+                err_output
+            }
+        }
     }
 
     /// Checks use `x.use`.
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
index e70aa1a7064..c804dc5e7fb 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
@@ -33,7 +33,6 @@ use crate::fn_ctxt::arg_matrix::{ArgMatrix, Compatibility, Error, ExpectedIdx, P
 use crate::fn_ctxt::infer::FnCall;
 use crate::gather_locals::Declaration;
 use crate::inline_asm::InlineAsmCtxt;
-use crate::method::MethodCallee;
 use crate::method::probe::IsSuggestion;
 use crate::method::probe::Mode::MethodCall;
 use crate::method::probe::ProbeScope::TraitsInScope;
@@ -127,61 +126,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         }
     }
 
-    pub(in super::super) fn check_method_argument_types(
-        &self,
-        sp: Span,
-        expr: &'tcx hir::Expr<'tcx>,
-        method: Result<MethodCallee<'tcx>, ErrorGuaranteed>,
-        args_no_rcvr: &'tcx [hir::Expr<'tcx>],
-        tuple_arguments: TupleArgumentsFlag,
-        expected: Expectation<'tcx>,
-    ) -> Ty<'tcx> {
-        let has_error = match method {
-            Ok(method) => method.args.error_reported().and(method.sig.error_reported()),
-            Err(guar) => Err(guar),
-        };
-        if let Err(guar) = has_error {
-            let err_inputs = self.err_args(
-                method.map_or(args_no_rcvr.len(), |method| method.sig.inputs().len() - 1),
-                guar,
-            );
-            let err_output = Ty::new_error(self.tcx, guar);
-
-            let err_inputs = match tuple_arguments {
-                DontTupleArguments => err_inputs,
-                TupleArguments => vec![Ty::new_tup(self.tcx, &err_inputs)],
-            };
-
-            self.check_argument_types(
-                sp,
-                expr,
-                &err_inputs,
-                err_output,
-                NoExpectation,
-                args_no_rcvr,
-                false,
-                tuple_arguments,
-                method.ok().map(|method| method.def_id),
-            );
-            return err_output;
-        }
-
-        let method = method.unwrap();
-        self.check_argument_types(
-            sp,
-            expr,
-            &method.sig.inputs()[1..],
-            method.sig.output(),
-            expected,
-            args_no_rcvr,
-            method.sig.c_variadic,
-            tuple_arguments,
-            Some(method.def_id),
-        );
-
-        method.sig.output()
-    }
-
     /// Generic function that factors out common logic from function calls,
     /// method calls and overloaded operators.
     pub(in super::super) fn check_argument_types(
diff --git a/compiler/rustc_hir_typeck/src/method/mod.rs b/compiler/rustc_hir_typeck/src/method/mod.rs
index 1b67e2306aa..34bbb7d7c05 100644
--- a/compiler/rustc_hir_typeck/src/method/mod.rs
+++ b/compiler/rustc_hir_typeck/src/method/mod.rs
@@ -19,7 +19,7 @@ use rustc_middle::ty::{
     self, GenericArgs, GenericArgsRef, GenericParamDefKind, Ty, TypeVisitableExt,
 };
 use rustc_middle::{bug, span_bug};
-use rustc_span::{ErrorGuaranteed, Ident, Span};
+use rustc_span::{ErrorGuaranteed, Ident, Span, Symbol};
 use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
 use rustc_trait_selection::traits::{self, NormalizeExt};
 use tracing::{debug, instrument};
@@ -329,10 +329,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     /// an obligation for a particular trait with the given self type and checks
     /// whether that trait is implemented.
     #[instrument(level = "debug", skip(self))]
-    pub(super) fn lookup_method_in_trait(
+    pub(super) fn lookup_method_for_operator(
         &self,
         cause: ObligationCause<'tcx>,
-        m_name: Ident,
+        method_name: Symbol,
         trait_def_id: DefId,
         self_ty: Ty<'tcx>,
         opt_rhs_ty: Option<Ty<'tcx>>,
@@ -374,13 +374,20 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         // Trait must have a method named `m_name` and it should not have
         // type parameters or early-bound regions.
         let tcx = self.tcx;
-        let Some(method_item) = self.associated_value(trait_def_id, m_name) else {
+        // We use `Ident::with_dummy_span` since no built-in operator methods have
+        // any macro-specific hygeine, so the span's context doesn't really matter.
+        let Some(method_item) =
+            self.associated_value(trait_def_id, Ident::with_dummy_span(method_name))
+        else {
             bug!("expected associated item for operator trait")
         };
 
         let def_id = method_item.def_id;
         if !method_item.is_fn() {
-            span_bug!(tcx.def_span(def_id), "expected `{m_name}` to be an associated function");
+            span_bug!(
+                tcx.def_span(def_id),
+                "expected `{method_name}` to be an associated function"
+            );
         }
 
         debug!("lookup_in_trait_adjusted: method_item={:?}", method_item);
diff --git a/compiler/rustc_hir_typeck/src/method/probe.rs b/compiler/rustc_hir_typeck/src/method/probe.rs
index 1d3a081cbb8..bda051f1560 100644
--- a/compiler/rustc_hir_typeck/src/method/probe.rs
+++ b/compiler/rustc_hir_typeck/src/method/probe.rs
@@ -1213,7 +1213,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
                 debug!("pick_all_method: step={:?}", step);
                 // skip types that are from a type error or that would require dereferencing
                 // a raw pointer
-                !step.self_ty.references_error() && !step.from_unsafe_deref
+                !step.self_ty.value.references_error() && !step.from_unsafe_deref
             })
             .find_map(|step| {
                 let InferOk { value: self_ty, obligations: _ } = self
diff --git a/compiler/rustc_hir_typeck/src/op.rs b/compiler/rustc_hir_typeck/src/op.rs
index b86991f81ad..7f7921b66b5 100644
--- a/compiler/rustc_hir_typeck/src/op.rs
+++ b/compiler/rustc_hir_typeck/src/op.rs
@@ -12,7 +12,7 @@ use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::{self, IsSuggestable, Ty, TyCtxt, TypeVisitableExt};
 use rustc_session::errors::ExprParenthesesNeeded;
 use rustc_span::source_map::Spanned;
-use rustc_span::{Ident, Span, Symbol, sym};
+use rustc_span::{Span, Symbol, sym};
 use rustc_trait_selection::infer::InferCtxtExt;
 use rustc_trait_selection::traits::{FulfillmentError, Obligation, ObligationCtxt};
 use tracing::debug;
@@ -975,7 +975,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             lhs_ty, opname, trait_did
         );
 
-        let opname = Ident::with_dummy_span(opname);
         let (opt_rhs_expr, opt_rhs_ty) = opt_rhs.unzip();
         let cause = self.cause(
             span,
@@ -990,7 +989,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         );
 
         let method =
-            self.lookup_method_in_trait(cause.clone(), opname, trait_did, lhs_ty, opt_rhs_ty);
+            self.lookup_method_for_operator(cause.clone(), opname, trait_did, lhs_ty, opt_rhs_ty);
         match method {
             Some(ok) => {
                 let method = self.register_infer_ok_obligations(ok);
diff --git a/compiler/rustc_hir_typeck/src/place_op.rs b/compiler/rustc_hir_typeck/src/place_op.rs
index 4fc903cf68b..fedc75abe49 100644
--- a/compiler/rustc_hir_typeck/src/place_op.rs
+++ b/compiler/rustc_hir_typeck/src/place_op.rs
@@ -8,7 +8,7 @@ use rustc_middle::ty::adjustment::{
     PointerCoercion,
 };
 use rustc_middle::ty::{self, Ty};
-use rustc_span::{Ident, Span, sym};
+use rustc_span::{Span, sym};
 use tracing::debug;
 use {rustc_ast as ast, rustc_hir as hir};
 
@@ -211,13 +211,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             return None;
         };
 
-        self.lookup_method_in_trait(
-            self.misc(span),
-            Ident::with_dummy_span(imm_op),
-            imm_tr,
-            base_ty,
-            opt_rhs_ty,
-        )
+        self.lookup_method_for_operator(self.misc(span), imm_op, imm_tr, base_ty, opt_rhs_ty)
     }
 
     fn try_mutable_overloaded_place_op(
@@ -237,13 +231,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             return None;
         };
 
-        self.lookup_method_in_trait(
-            self.misc(span),
-            Ident::with_dummy_span(mut_op),
-            mut_tr,
-            base_ty,
-            opt_rhs_ty,
-        )
+        self.lookup_method_for_operator(self.misc(span), mut_op, mut_tr, base_ty, opt_rhs_ty)
     }
 
     /// Convert auto-derefs, indices, etc of an expression from `Deref` and `Index`
diff --git a/compiler/rustc_hir_typeck/src/writeback.rs b/compiler/rustc_hir_typeck/src/writeback.rs
index c5000171ad7..4a171a08ef7 100644
--- a/compiler/rustc_hir_typeck/src/writeback.rs
+++ b/compiler/rustc_hir_typeck/src/writeback.rs
@@ -1,6 +1,12 @@
-// Type resolution: the phase that finds all the types in the AST with
-// unresolved type variables and replaces "ty_var" types with their
-// generic parameters.
+//! During type inference, partially inferred terms are
+//! represented using inference variables (ty::Infer). These don't appear in
+//! the final [`ty::TypeckResults`] since all of the types should have been
+//! inferred once typeck is done.
+//!
+//! When type inference is running however, having to update the typeck results
+//! every time a new type is inferred would be unreasonably slow, so instead all
+//! of the replacement happens at the end in [`FnCtxt::resolve_type_vars_in_body`],
+//! which creates a new `TypeckResults` which doesn't contain any inference variables.
 
 use std::mem;
 
@@ -9,7 +15,6 @@ use rustc_errors::ErrorGuaranteed;
 use rustc_hir::intravisit::{self, InferKind, Visitor};
 use rustc_hir::{self as hir, AmbigArg, HirId};
 use rustc_infer::traits::solve::Goal;
-use rustc_middle::span_bug;
 use rustc_middle::traits::ObligationCause;
 use rustc_middle::ty::adjustment::{Adjust, Adjustment, PointerCoercion};
 use rustc_middle::ty::{
@@ -27,15 +32,6 @@ use crate::FnCtxt;
 ///////////////////////////////////////////////////////////////////////////
 // Entry point
 
-// During type inference, partially inferred types are
-// represented using Type variables (ty::Infer). These don't appear in
-// the final TypeckResults since all of the types should have been
-// inferred once typeck is done.
-// When type inference is running however, having to update the typeck
-// typeck results every time a new type is inferred would be unreasonably slow,
-// so instead all of the replacement happens at the end in
-// resolve_type_vars_in_body, which creates a new TypeTables which
-// doesn't contain any inference types.
 impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     pub(crate) fn resolve_type_vars_in_body(
         &self,
@@ -90,14 +86,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     }
 }
 
-///////////////////////////////////////////////////////////////////////////
-// The Writeback context. This visitor walks the HIR, checking the
-// fn-specific typeck results to find references to types or regions. It
-// resolves those regions to remove inference variables and writes the
-// final result back into the master typeck results in the tcx. Here and
-// there, it applies a few ad-hoc checks that were not convenient to
-// do elsewhere.
-
+/// The Writeback context. This visitor walks the HIR, checking the
+/// fn-specific typeck results to find inference variables. It resolves
+/// those inference variables and writes the final result into the
+/// `TypeckResults`. It also applies a few ad-hoc checks that were not
+/// convenient to do elsewhere.
 struct WritebackCx<'cx, 'tcx> {
     fcx: &'cx FnCtxt<'cx, 'tcx>,
 
@@ -513,15 +506,6 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
         self.typeck_results.user_provided_types_mut().extend(
             fcx_typeck_results.user_provided_types().items().map(|(local_id, c_ty)| {
                 let hir_id = HirId { owner: common_hir_owner, local_id };
-
-                if cfg!(debug_assertions) && c_ty.has_infer() {
-                    span_bug!(
-                        hir_id.to_span(self.fcx.tcx),
-                        "writeback: `{:?}` has inference variables",
-                        c_ty
-                    );
-                };
-
                 (hir_id, *c_ty)
             }),
         );
@@ -532,17 +516,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
         assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
 
         self.typeck_results.user_provided_sigs.extend_unord(
-            fcx_typeck_results.user_provided_sigs.items().map(|(&def_id, c_sig)| {
-                if cfg!(debug_assertions) && c_sig.has_infer() {
-                    span_bug!(
-                        self.fcx.tcx.def_span(def_id),
-                        "writeback: `{:?}` has inference variables",
-                        c_sig
-                    );
-                };
-
-                (def_id, *c_sig)
-            }),
+            fcx_typeck_results.user_provided_sigs.items().map(|(def_id, c_sig)| (*def_id, *c_sig)),
         );
     }
 
@@ -897,7 +871,7 @@ impl<'cx, 'tcx> Resolver<'cx, 'tcx> {
             let cause = ObligationCause::misc(self.span.to_span(tcx), body_id);
             let at = self.fcx.at(&cause, self.fcx.param_env);
             let universes = vec![None; outer_exclusive_binder(value).as_usize()];
-            match solve::deeply_normalize_with_skipped_universes_and_ambiguous_goals(
+            match solve::deeply_normalize_with_skipped_universes_and_ambiguous_coroutine_goals(
                 at, value, universes,
             ) {
                 Ok((value, goals)) => {
diff --git a/compiler/rustc_lint/src/nonstandard_style.rs b/compiler/rustc_lint/src/nonstandard_style.rs
index a3e7c84584d..d1138e8f1fa 100644
--- a/compiler/rustc_lint/src/nonstandard_style.rs
+++ b/compiler/rustc_lint/src/nonstandard_style.rs
@@ -422,6 +422,16 @@ impl<'tcx> LateLintPass<'tcx> for NonSnakeCase {
         }
     }
 
+    fn check_ty(&mut self, cx: &LateContext<'_>, ty: &hir::Ty<'_, hir::AmbigArg>) {
+        if let hir::TyKind::BareFn(hir::BareFnTy { param_idents, .. }) = &ty.kind {
+            for param_ident in *param_idents {
+                if let Some(param_ident) = param_ident {
+                    self.check_snake_case(cx, "variable", param_ident);
+                }
+            }
+        }
+    }
+
     fn check_trait_item(&mut self, cx: &LateContext<'_>, item: &hir::TraitItem<'_>) {
         if let hir::TraitItemKind::Fn(_, hir::TraitFn::Required(param_idents)) = item.kind {
             self.check_snake_case(cx, "trait method", &item.ident);
diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs
index 7b6a723b0b4..f1c06dfe6ce 100644
--- a/compiler/rustc_lint/src/types.rs
+++ b/compiler/rustc_lint/src/types.rs
@@ -755,10 +755,10 @@ declare_lint! {
     /// *subsequent* fields of the associated structs to use an alignment value
     /// where the floating-point type is aligned on a 4-byte boundary.
     ///
-    /// The power alignment rule for structs needed for C compatibility is
-    /// unimplementable within `repr(C)` in the compiler without building in
-    /// handling of references to packed fields and infectious nested layouts,
-    /// so a warning is produced in these situations.
+    /// Effectively, subsequent floating-point fields act as-if they are `repr(packed(4))`. This
+    /// would be unsound to do in a `repr(C)` type without all the restrictions that come with
+    /// `repr(packed)`. Rust instead chooses a layout that maintains soundness of Rust code, at the
+    /// expense of incompatibility with C code.
     ///
     /// ### Example
     ///
@@ -790,8 +790,10 @@ declare_lint! {
     ///  - offset_of!(Floats, a) == 0
     ///  - offset_of!(Floats, b) == 8
     ///  - offset_of!(Floats, c) == 12
-    /// However, rust currently aligns `c` at offset_of!(Floats, c) == 16.
-    /// Thus, a warning should be produced for the above struct in this case.
+    ///
+    /// However, Rust currently aligns `c` at `offset_of!(Floats, c) == 16`.
+    /// Using offset 12 would be unsound since `f64` generally must be 8-aligned on this target.
+    /// Thus, a warning is produced for the above struct.
     USES_POWER_ALIGNMENT,
     Warn,
     "Structs do not follow the power alignment rule under repr(C)"
@@ -878,25 +880,36 @@ fn ty_is_known_nonnull<'tcx>(
         }
         ty::Pat(base, pat) => {
             ty_is_known_nonnull(tcx, typing_env, *base, mode)
-                || Option::unwrap_or_default(
-                    try {
-                        match **pat {
-                            ty::PatternKind::Range { start, end } => {
-                                let start = start.try_to_value()?.try_to_bits(tcx, typing_env)?;
-                                let end = end.try_to_value()?.try_to_bits(tcx, typing_env)?;
-
-                                // This also works for negative numbers, as we just need
-                                // to ensure we aren't wrapping over zero.
-                                start > 0 && end >= start
-                            }
-                        }
-                    },
-                )
+                || pat_ty_is_known_nonnull(tcx, typing_env, *pat)
         }
         _ => false,
     }
 }
 
+fn pat_ty_is_known_nonnull<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    typing_env: ty::TypingEnv<'tcx>,
+    pat: ty::Pattern<'tcx>,
+) -> bool {
+    Option::unwrap_or_default(
+        try {
+            match *pat {
+                ty::PatternKind::Range { start, end } => {
+                    let start = start.try_to_value()?.try_to_bits(tcx, typing_env)?;
+                    let end = end.try_to_value()?.try_to_bits(tcx, typing_env)?;
+
+                    // This also works for negative numbers, as we just need
+                    // to ensure we aren't wrapping over zero.
+                    start > 0 && end >= start
+                }
+                ty::PatternKind::Or(patterns) => {
+                    patterns.iter().all(|pat| pat_ty_is_known_nonnull(tcx, typing_env, pat))
+                }
+            }
+        },
+    )
+}
+
 /// Given a non-null scalar (or transparent) type `ty`, return the nullable version of that type.
 /// If the type passed in was not scalar, returns None.
 fn get_nullable_type<'tcx>(
@@ -1038,13 +1051,29 @@ pub(crate) fn repr_nullable_ptr<'tcx>(
             }
             None
         }
-        ty::Pat(base, pat) => match **pat {
-            ty::PatternKind::Range { .. } => get_nullable_type(tcx, typing_env, *base),
-        },
+        ty::Pat(base, pat) => get_nullable_type_from_pat(tcx, typing_env, *base, *pat),
         _ => None,
     }
 }
 
+fn get_nullable_type_from_pat<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    typing_env: ty::TypingEnv<'tcx>,
+    base: Ty<'tcx>,
+    pat: ty::Pattern<'tcx>,
+) -> Option<Ty<'tcx>> {
+    match *pat {
+        ty::PatternKind::Range { .. } => get_nullable_type(tcx, typing_env, base),
+        ty::PatternKind::Or(patterns) => {
+            let first = get_nullable_type_from_pat(tcx, typing_env, base, patterns[0])?;
+            for &pat in &patterns[1..] {
+                assert_eq!(first, get_nullable_type_from_pat(tcx, typing_env, base, pat)?);
+            }
+            Some(first)
+        }
+    }
+}
+
 impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
     /// Check if the type is array and emit an unsafe type lint.
     fn check_for_array_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
@@ -1628,15 +1657,13 @@ impl ImproperCTypesDefinitions {
         cx: &LateContext<'tcx>,
         ty: Ty<'tcx>,
     ) -> bool {
+        assert!(cx.tcx.sess.target.os == "aix");
         // Structs (under repr(C)) follow the power alignment rule if:
         //   - the first field of the struct is a floating-point type that
         //     is greater than 4-bytes, or
         //   - the first field of the struct is an aggregate whose
         //     recursively first field is a floating-point type greater than
         //     4 bytes.
-        if cx.tcx.sess.target.os != "aix" {
-            return false;
-        }
         if ty.is_floating_point() && ty.primitive_size(cx.tcx).bytes() > 4 {
             return true;
         } else if let Adt(adt_def, _) = ty.kind()
@@ -1674,21 +1701,14 @@ impl ImproperCTypesDefinitions {
             && !adt_def.all_fields().next().is_none()
         {
             let struct_variant_data = item.expect_struct().1;
-            for (index, ..) in struct_variant_data.fields().iter().enumerate() {
+            for field_def in struct_variant_data.fields().iter().skip(1) {
                 // Struct fields (after the first field) are checked for the
                 // power alignment rule, as fields after the first are likely
                 // to be the fields that are misaligned.
-                if index != 0 {
-                    let first_field_def = struct_variant_data.fields()[index];
-                    let def_id = first_field_def.def_id;
-                    let ty = cx.tcx.type_of(def_id).instantiate_identity();
-                    if self.check_arg_for_power_alignment(cx, ty) {
-                        cx.emit_span_lint(
-                            USES_POWER_ALIGNMENT,
-                            first_field_def.span,
-                            UsesPowerAlignment,
-                        );
-                    }
+                let def_id = field_def.def_id;
+                let ty = cx.tcx.type_of(def_id).instantiate_identity();
+                if self.check_arg_for_power_alignment(cx, ty) {
+                    cx.emit_span_lint(USES_POWER_ALIGNMENT, field_def.span, UsesPowerAlignment);
                 }
             }
         }
diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
index 8bee051dd4c..ebe8eb57f2c 100644
--- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
@@ -1365,7 +1365,12 @@ LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules, size_t num_modules,
   // Convert the preserved symbols set from string to GUID, this is then needed
   // for internalization.
   for (size_t i = 0; i < num_symbols; i++) {
+#if LLVM_VERSION_GE(21, 0)
+    auto GUID =
+        GlobalValue::getGUIDAssumingExternalLinkage(preserved_symbols[i]);
+#else
     auto GUID = GlobalValue::getGUID(preserved_symbols[i]);
+#endif
     Ret->GUIDPreservedSymbols.insert(GUID);
   }
 
@@ -1685,11 +1690,11 @@ extern "C" void LLVMRustComputeLTOCacheKey(RustStringRef KeyOut,
   // Based on the 'InProcessThinBackend' constructor in LLVM
 #if LLVM_VERSION_GE(21, 0)
   for (auto &Name : Data->Index.cfiFunctionDefs().symbols())
-    CfiFunctionDefs.insert(
-        GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(Name)));
+    CfiFunctionDefs.insert(GlobalValue::getGUIDAssumingExternalLinkage(
+        GlobalValue::dropLLVMManglingEscape(Name)));
   for (auto &Name : Data->Index.cfiFunctionDecls().symbols())
-    CfiFunctionDecls.insert(
-        GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(Name)));
+    CfiFunctionDecls.insert(GlobalValue::getGUIDAssumingExternalLinkage(
+        GlobalValue::dropLLVMManglingEscape(Name)));
 #else
   for (auto &Name : Data->Index.cfiFunctionDefs())
     CfiFunctionDefs.insert(
diff --git a/compiler/rustc_macros/src/query.rs b/compiler/rustc_macros/src/query.rs
index 62bf34ad5ad..33fb13e23bf 100644
--- a/compiler/rustc_macros/src/query.rs
+++ b/compiler/rustc_macros/src/query.rs
@@ -407,11 +407,23 @@ pub(super) fn rustc_queries(input: TokenStream) -> TokenStream {
     }
 
     TokenStream::from(quote! {
+        /// Higher-order macro that invokes the specified macro with a prepared
+        /// list of all query signatures (including modifiers).
+        ///
+        /// This allows multiple simpler macros to each have access to the list
+        /// of queries.
         #[macro_export]
-        macro_rules! rustc_query_append {
-            ($macro:ident! $( [$($other:tt)*] )?) => {
+        macro_rules! rustc_with_all_queries {
+            (
+                // The macro to invoke once, on all queries (plus extras).
+                $macro:ident!
+
+                // Within [], an optional list of extra "query" signatures to
+                // pass to the given macro, in addition to the actual queries.
+                $( [$($extra_fake_queries:tt)*] )?
+            ) => {
                 $macro! {
-                    $( $($other)* )?
+                    $( $($extra_fake_queries)* )?
                     #query_stream
                 }
             }
diff --git a/compiler/rustc_middle/src/dep_graph/dep_node.rs b/compiler/rustc_middle/src/dep_graph/dep_node.rs
index 644cdac5d55..0c998a2cbb3 100644
--- a/compiler/rustc_middle/src/dep_graph/dep_node.rs
+++ b/compiler/rustc_middle/src/dep_graph/dep_node.rs
@@ -13,8 +13,11 @@ use crate::ty::TyCtxt;
 
 macro_rules! define_dep_nodes {
     (
-     $($(#[$attr:meta])*
-        [$($modifiers:tt)*] fn $variant:ident($($K:tt)*) -> $V:ty,)*) => {
+        $(
+            $(#[$attr:meta])*
+            [$($modifiers:tt)*] fn $variant:ident($($K:tt)*) -> $V:ty,
+        )*
+    ) => {
 
         #[macro_export]
         macro_rules! make_dep_kind_array {
@@ -83,7 +86,9 @@ macro_rules! define_dep_nodes {
     };
 }
 
-rustc_query_append!(define_dep_nodes![
+// Create various data structures for each query, and also for a few things
+// that aren't queries.
+rustc_with_all_queries!(define_dep_nodes![
     /// We use this for most things when incr. comp. is turned off.
     [] fn Null() -> (),
     /// We use this to create a forever-red node.
diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs
index df025aeebf0..711036865bb 100644
--- a/compiler/rustc_middle/src/lib.rs
+++ b/compiler/rustc_middle/src/lib.rs
@@ -61,6 +61,7 @@
 #![feature(try_trait_v2_yeet)]
 #![feature(type_alias_impl_trait)]
 #![feature(yeet_expr)]
+#![recursion_limit = "256"]
 // tidy-alphabetical-end
 
 #[cfg(test)]
diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs
index 4222a68e544..e5d1dda3aa0 100644
--- a/compiler/rustc_middle/src/mir/interpret/queries.rs
+++ b/compiler/rustc_middle/src/mir/interpret/queries.rs
@@ -115,15 +115,16 @@ impl<'tcx> TyCtxt<'tcx> {
                     // @lcnr believes that successfully evaluating even though there are
                     // used generic parameters is a bug of evaluation, so checking for it
                     // here does feel somewhat sensible.
-                    if !self.features().generic_const_exprs() && ct.args.has_non_region_param() {
-                        let def_kind = self.def_kind(instance.def_id());
-                        assert!(
-                            matches!(
-                                def_kind,
-                                DefKind::InlineConst | DefKind::AnonConst | DefKind::AssocConst
-                            ),
-                            "{cid:?} is {def_kind:?}",
-                        );
+                    if !self.features().generic_const_exprs()
+                        && ct.args.has_non_region_param()
+                        // We only FCW for anon consts as repeat expr counts with anon consts are the only place
+                        // that we have a back compat hack for. We don't need to check this is a const argument
+                        // as only anon consts as const args should get evaluated "for the type system".
+                        //
+                        // If we don't *only* FCW anon consts we can wind up incorrectly FCW'ing uses of assoc
+                        // consts in pattern positions. #140447
+                        && self.def_kind(instance.def_id()) == DefKind::AnonConst
+                    {
                         let mir_body = self.mir_for_ctfe(instance.def_id());
                         if mir_body.is_polymorphic {
                             let Some(local_def_id) = ct.def.as_local() else { return };
diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs
index b55e4d7d831..88f4c4ae4d3 100644
--- a/compiler/rustc_middle/src/query/mod.rs
+++ b/compiler/rustc_middle/src/query/mod.rs
@@ -2578,5 +2578,5 @@ rustc_queries! {
     }
 }
 
-rustc_query_append! { define_callbacks! }
+rustc_with_all_queries! { define_callbacks! }
 rustc_feedable_queries! { define_feedable! }
diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs
index 69b6f88d72b..769df1ffd6f 100644
--- a/compiler/rustc_middle/src/query/plumbing.rs
+++ b/compiler/rustc_middle/src/query/plumbing.rs
@@ -313,8 +313,11 @@ macro_rules! separate_provide_extern_default {
 
 macro_rules! define_callbacks {
     (
-     $($(#[$attr:meta])*
-        [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
+        $(
+            $(#[$attr:meta])*
+            [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,
+        )*
+    ) => {
 
         #[allow(unused_lifetimes)]
         pub mod queries {
diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs
index 23927c112bc..5ff87959a80 100644
--- a/compiler/rustc_middle/src/ty/codec.rs
+++ b/compiler/rustc_middle/src/ty/codec.rs
@@ -442,6 +442,15 @@ impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::List<ty::BoundVaria
     }
 }
 
+impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::List<ty::Pattern<'tcx>> {
+    fn decode(decoder: &mut D) -> &'tcx Self {
+        let len = decoder.read_usize();
+        decoder.interner().mk_patterns_from_iter(
+            (0..len).map::<ty::Pattern<'tcx>, _>(|_| Decodable::decode(decoder)),
+        )
+    }
+}
+
 impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::List<ty::Const<'tcx>> {
     fn decode(decoder: &mut D) -> &'tcx Self {
         let len = decoder.read_usize();
@@ -503,6 +512,7 @@ impl_decodable_via_ref! {
     &'tcx mir::Body<'tcx>,
     &'tcx mir::ConcreteOpaqueTypes<'tcx>,
     &'tcx ty::List<ty::BoundVariableKind>,
+    &'tcx ty::List<ty::Pattern<'tcx>>,
     &'tcx ty::ListWithCachedTypeInfo<ty::Clause<'tcx>>,
     &'tcx ty::List<FieldIdx>,
     &'tcx ty::List<(VariantIdx, FieldIdx)>,
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
index 1efd0d1d14b..0865e378bf3 100644
--- a/compiler/rustc_middle/src/ty/context.rs
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -136,6 +136,7 @@ impl<'tcx> Interner for TyCtxt<'tcx> {
 
     type AllocId = crate::mir::interpret::AllocId;
     type Pat = Pattern<'tcx>;
+    type PatList = &'tcx List<Pattern<'tcx>>;
     type Safety = hir::Safety;
     type Abi = ExternAbi;
     type Const = ty::Const<'tcx>;
@@ -842,6 +843,7 @@ pub struct CtxtInterners<'tcx> {
     captures: InternedSet<'tcx, List<&'tcx ty::CapturedPlace<'tcx>>>,
     offset_of: InternedSet<'tcx, List<(VariantIdx, FieldIdx)>>,
     valtree: InternedSet<'tcx, ty::ValTreeKind<'tcx>>,
+    patterns: InternedSet<'tcx, List<ty::Pattern<'tcx>>>,
 }
 
 impl<'tcx> CtxtInterners<'tcx> {
@@ -878,6 +880,7 @@ impl<'tcx> CtxtInterners<'tcx> {
             captures: InternedSet::with_capacity(N),
             offset_of: InternedSet::with_capacity(N),
             valtree: InternedSet::with_capacity(N),
+            patterns: InternedSet::with_capacity(N),
         }
     }
 
@@ -2662,6 +2665,7 @@ slice_interners!(
     local_def_ids: intern_local_def_ids(LocalDefId),
     captures: intern_captures(&'tcx ty::CapturedPlace<'tcx>),
     offset_of: pub mk_offset_of((VariantIdx, FieldIdx)),
+    patterns: pub mk_patterns(Pattern<'tcx>),
 );
 
 impl<'tcx> TyCtxt<'tcx> {
@@ -2935,6 +2939,14 @@ impl<'tcx> TyCtxt<'tcx> {
         self.intern_local_def_ids(def_ids)
     }
 
+    pub fn mk_patterns_from_iter<I, T>(self, iter: I) -> T::Output
+    where
+        I: Iterator<Item = T>,
+        T: CollectAndApply<ty::Pattern<'tcx>, &'tcx List<ty::Pattern<'tcx>>>,
+    {
+        T::collect_and_apply(iter, |xs| self.mk_patterns(xs))
+    }
+
     pub fn mk_local_def_ids_from_iter<I, T>(self, iter: I) -> T::Output
     where
         I: Iterator<Item = T>,
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index eba6d61ba7d..2f4c03f0953 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -50,6 +50,7 @@ use rustc_session::lint::LintBuffer;
 pub use rustc_session::lint::RegisteredTools;
 use rustc_span::hygiene::MacroKind;
 use rustc_span::{DUMMY_SP, ExpnId, ExpnKind, Ident, Span, Symbol, kw, sym};
+pub use rustc_type_ir::data_structures::DelayedSet;
 pub use rustc_type_ir::relate::VarianceDiagInfo;
 pub use rustc_type_ir::*;
 use tracing::{debug, instrument};
diff --git a/compiler/rustc_middle/src/ty/pattern.rs b/compiler/rustc_middle/src/ty/pattern.rs
index 758adc42e3e..5af9b17dd77 100644
--- a/compiler/rustc_middle/src/ty/pattern.rs
+++ b/compiler/rustc_middle/src/ty/pattern.rs
@@ -23,6 +23,13 @@ impl<'tcx> Flags for Pattern<'tcx> {
                 FlagComputation::for_const_kind(&start.kind()).flags
                     | FlagComputation::for_const_kind(&end.kind()).flags
             }
+            ty::PatternKind::Or(pats) => {
+                let mut flags = pats[0].flags();
+                for pat in pats[1..].iter() {
+                    flags |= pat.flags();
+                }
+                flags
+            }
         }
     }
 
@@ -31,6 +38,13 @@ impl<'tcx> Flags for Pattern<'tcx> {
             ty::PatternKind::Range { start, end } => {
                 start.outer_exclusive_binder().max(end.outer_exclusive_binder())
             }
+            ty::PatternKind::Or(pats) => {
+                let mut idx = pats[0].outer_exclusive_binder();
+                for pat in pats[1..].iter() {
+                    idx = idx.max(pat.outer_exclusive_binder());
+                }
+                idx
+            }
         }
     }
 }
@@ -77,6 +91,19 @@ impl<'tcx> IrPrint<PatternKind<'tcx>> for TyCtxt<'tcx> {
 
                 write!(f, "..={end}")
             }
+            PatternKind::Or(patterns) => {
+                write!(f, "(")?;
+                let mut first = true;
+                for pat in patterns {
+                    if first {
+                        first = false
+                    } else {
+                        write!(f, " | ")?;
+                    }
+                    write!(f, "{pat:?}")?;
+                }
+                write!(f, ")")
+            }
         }
     }
 
diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs
index b1dfcb80bde..6ad4e5276b2 100644
--- a/compiler/rustc_middle/src/ty/relate.rs
+++ b/compiler/rustc_middle/src/ty/relate.rs
@@ -49,6 +49,7 @@ impl<'tcx> Relate<TyCtxt<'tcx>> for ty::Pattern<'tcx> {
         a: Self,
         b: Self,
     ) -> RelateResult<'tcx, Self> {
+        let tcx = relation.cx();
         match (&*a, &*b) {
             (
                 &ty::PatternKind::Range { start: start_a, end: end_a },
@@ -56,8 +57,17 @@ impl<'tcx> Relate<TyCtxt<'tcx>> for ty::Pattern<'tcx> {
             ) => {
                 let start = relation.relate(start_a, start_b)?;
                 let end = relation.relate(end_a, end_b)?;
-                Ok(relation.cx().mk_pat(ty::PatternKind::Range { start, end }))
+                Ok(tcx.mk_pat(ty::PatternKind::Range { start, end }))
+            }
+            (&ty::PatternKind::Or(a), &ty::PatternKind::Or(b)) => {
+                if a.len() != b.len() {
+                    return Err(TypeError::Mismatch);
+                }
+                let v = iter::zip(a, b).map(|(a, b)| relation.relate(a, b));
+                let patterns = tcx.mk_patterns_from_iter(v)?;
+                Ok(tcx.mk_pat(ty::PatternKind::Or(patterns)))
             }
+            (ty::PatternKind::Range { .. } | ty::PatternKind::Or(_), _) => Err(TypeError::Mismatch),
         }
     }
 }
diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs
index 26861666c1d..58f7bc75054 100644
--- a/compiler/rustc_middle/src/ty/structural_impls.rs
+++ b/compiler/rustc_middle/src/ty/structural_impls.rs
@@ -13,7 +13,6 @@ use rustc_type_ir::{ConstKind, TypeFolder, VisitorResult, try_visit};
 
 use super::print::PrettyPrinter;
 use super::{GenericArg, GenericArgKind, Pattern, Region};
-use crate::infer::canonical::CanonicalVarInfos;
 use crate::mir::PlaceElem;
 use crate::ty::print::{FmtPrinter, Printer, with_no_trimmed_paths};
 use crate::ty::{
@@ -779,5 +778,5 @@ list_fold! {
     ty::Clauses<'tcx> : mk_clauses,
     &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>> : mk_poly_existential_predicates,
     &'tcx ty::List<PlaceElem<'tcx>> : mk_place_elems,
-    CanonicalVarInfos<'tcx> : mk_canonical_var_infos,
+    &'tcx ty::List<ty::Pattern<'tcx>> : mk_patterns,
 }
diff --git a/compiler/rustc_middle/src/ty/typeck_results.rs b/compiler/rustc_middle/src/ty/typeck_results.rs
index 8c5827d36df..c6a45f84686 100644
--- a/compiler/rustc_middle/src/ty/typeck_results.rs
+++ b/compiler/rustc_middle/src/ty/typeck_results.rs
@@ -716,6 +716,8 @@ pub type CanonicalUserTypeAnnotations<'tcx> =
 
 #[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
 pub struct CanonicalUserTypeAnnotation<'tcx> {
+    #[type_foldable(identity)]
+    #[type_visitable(ignore)]
     pub user_ty: Box<CanonicalUserType<'tcx>>,
     pub span: Span,
     pub inferred_ty: Ty<'tcx>,
diff --git a/compiler/rustc_mir_transform/src/elaborate_drop.rs b/compiler/rustc_mir_transform/src/elaborate_drop.rs
index 6f867f8105d..73a58160a6a 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drop.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drop.rs
@@ -376,7 +376,7 @@ where
         if self.tcx().features().async_drop()
             && self.elaborator.body().coroutine.is_some()
             && self.elaborator.allow_async_drops()
-            && !self.elaborator.body()[bb].is_cleanup
+            && !self.elaborator.patch_ref().block(self.elaborator.body(), bb).is_cleanup
             && drop_ty.needs_async_drop(self.tcx(), self.elaborator.typing_env())
         {
             self.build_async_drop(
diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs
index 9732225e48d..ada2c0b76cf 100644
--- a/compiler/rustc_mir_transform/src/jump_threading.rs
+++ b/compiler/rustc_mir_transform/src/jump_threading.rs
@@ -177,16 +177,8 @@ impl<'a> ConditionSet<'a> {
         arena: &'a DroplessArena,
         f: impl Fn(Condition) -> Option<Condition>,
     ) -> Option<ConditionSet<'a>> {
-        let mut all_ok = true;
-        let set = arena.alloc_from_iter(self.iter().map_while(|c| {
-            if let Some(c) = f(c) {
-                Some(c)
-            } else {
-                all_ok = false;
-                None
-            }
-        }));
-        all_ok.then_some(ConditionSet(set))
+        let set = arena.try_alloc_from_iter(self.iter().map(|c| f(c).ok_or(()))).ok()?;
+        Some(ConditionSet(set))
     }
 }
 
diff --git a/compiler/rustc_mir_transform/src/patch.rs b/compiler/rustc_mir_transform/src/patch.rs
index c7eb2a921c7..a872eae15f1 100644
--- a/compiler/rustc_mir_transform/src/patch.rs
+++ b/compiler/rustc_mir_transform/src/patch.rs
@@ -148,11 +148,20 @@ impl<'tcx> MirPatch<'tcx> {
         self.term_patch_map[bb].is_some()
     }
 
+    /// Universal getter for block data, either it is in 'old' blocks or in patched ones
+    pub(crate) fn block<'a>(
+        &'a self,
+        body: &'a Body<'tcx>,
+        bb: BasicBlock,
+    ) -> &'a BasicBlockData<'tcx> {
+        match bb.index().checked_sub(body.basic_blocks.len()) {
+            Some(new) => &self.new_blocks[new],
+            None => &body[bb],
+        }
+    }
+
     pub(crate) fn terminator_loc(&self, body: &Body<'tcx>, bb: BasicBlock) -> Location {
-        let offset = match bb.index().checked_sub(body.basic_blocks.len()) {
-            Some(index) => self.new_blocks[index].statements.len(),
-            None => body[bb].statements.len(),
-        };
+        let offset = self.block(body, bb).statements.len();
         Location { block: bb, statement_index: offset }
     }
 
@@ -284,10 +293,7 @@ impl<'tcx> MirPatch<'tcx> {
     }
 
     pub(crate) fn source_info_for_location(&self, body: &Body<'tcx>, loc: Location) -> SourceInfo {
-        let data = match loc.block.index().checked_sub(body.basic_blocks.len()) {
-            Some(new) => &self.new_blocks[new],
-            None => &body[loc.block],
-        };
+        let data = self.block(body, loc.block);
         Self::source_info_for_index(data, loc)
     }
 }
diff --git a/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs b/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs
index 035bfff89b5..b16f74cd8e4 100644
--- a/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs
+++ b/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs
@@ -724,6 +724,9 @@ pub(in crate::solve) fn const_conditions_for_destruct<I: Interner>(
     let destruct_def_id = cx.require_lang_item(TraitSolverLangItem::Destruct);
 
     match self_ty.kind() {
+        // `ManuallyDrop` is trivially `~const Destruct` as we do not run any drop glue on it.
+        ty::Adt(adt_def, _) if adt_def.is_manually_drop() => Ok(vec![]),
+
         // An ADT is `~const Destruct` only if all of the fields are,
         // *and* if there is a `Drop` impl, that `Drop` impl is also `~const`.
         ty::Adt(adt_def, args) => {
diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs
index e73d68e2037..d06922f1e04 100644
--- a/compiler/rustc_parse/src/lib.rs
+++ b/compiler/rustc_parse/src/lib.rs
@@ -5,13 +5,13 @@
 #![allow(rustc::diagnostic_outside_of_impl)]
 #![allow(rustc::untranslatable_diagnostic)]
 #![cfg_attr(bootstrap, feature(let_chains))]
-#![feature(array_windows)]
 #![feature(assert_matches)]
 #![feature(box_patterns)]
 #![feature(debug_closure_helpers)]
 #![feature(if_let_guard)]
 #![feature(iter_intersperse)]
 #![feature(string_from_utf8_lossy_owned)]
+#![recursion_limit = "256"]
 // tidy-alphabetical-end
 
 use std::path::{Path, PathBuf};
diff --git a/compiler/rustc_parse/src/parser/attr.rs b/compiler/rustc_parse/src/parser/attr.rs
index 53614049f08..41d3889c448 100644
--- a/compiler/rustc_parse/src/parser/attr.rs
+++ b/compiler/rustc_parse/src/parser/attr.rs
@@ -1,5 +1,6 @@
 use rustc_ast as ast;
 use rustc_ast::token::{self, MetaVarKind};
+use rustc_ast::tokenstream::ParserRange;
 use rustc_ast::{Attribute, attr};
 use rustc_errors::codes::*;
 use rustc_errors::{Diag, PResult};
@@ -8,8 +9,7 @@ use thin_vec::ThinVec;
 use tracing::debug;
 
 use super::{
-    AttrWrapper, Capturing, FnParseMode, ForceCollect, Parser, ParserRange, PathStyle, Trailing,
-    UsePreAttrPos,
+    AttrWrapper, Capturing, FnParseMode, ForceCollect, Parser, PathStyle, Trailing, UsePreAttrPos,
 };
 use crate::{errors, exp, fluent_generated as fluent};
 
diff --git a/compiler/rustc_parse/src/parser/attr_wrapper.rs b/compiler/rustc_parse/src/parser/attr_wrapper.rs
index 6061c9cb485..44fdf146f9c 100644
--- a/compiler/rustc_parse/src/parser/attr_wrapper.rs
+++ b/compiler/rustc_parse/src/parser/attr_wrapper.rs
@@ -1,21 +1,18 @@
 use std::borrow::Cow;
-use std::{iter, mem};
+use std::mem;
 
-use rustc_ast::token::{Delimiter, Token};
+use rustc_ast::token::Token;
 use rustc_ast::tokenstream::{
-    AttrTokenStream, AttrTokenTree, AttrsTarget, DelimSpacing, DelimSpan, LazyAttrTokenStream,
-    Spacing, ToAttrTokenStream,
+    AttrsTarget, LazyAttrTokenStream, NodeRange, ParserRange, Spacing, TokenCursor,
 };
 use rustc_ast::{self as ast, AttrVec, Attribute, HasAttrs, HasTokens};
 use rustc_data_structures::fx::FxHashSet;
 use rustc_errors::PResult;
 use rustc_session::parse::ParseSess;
-use rustc_span::{DUMMY_SP, Span, sym};
+use rustc_span::{DUMMY_SP, sym};
+use thin_vec::ThinVec;
 
-use super::{
-    Capturing, FlatToken, ForceCollect, NodeRange, NodeReplacement, Parser, ParserRange,
-    TokenCursor, Trailing,
-};
+use super::{Capturing, ForceCollect, Parser, Trailing};
 
 // When collecting tokens, this fully captures the start point. Usually its
 // just after outer attributes, but occasionally it's before.
@@ -94,95 +91,6 @@ fn has_cfg_or_cfg_attr(attrs: &[Attribute]) -> bool {
     })
 }
 
-// From a value of this type we can reconstruct the `TokenStream` seen by the
-// `f` callback passed to a call to `Parser::collect_tokens`, by
-// replaying the getting of the tokens. This saves us producing a `TokenStream`
-// if it is never needed, e.g. a captured `macro_rules!` argument that is never
-// passed to a proc macro. In practice, token stream creation happens rarely
-// compared to calls to `collect_tokens` (see some statistics in #78736) so we
-// are doing as little up-front work as possible.
-//
-// This also makes `Parser` very cheap to clone, since
-// there is no intermediate collection buffer to clone.
-struct LazyAttrTokenStreamImpl {
-    start_token: (Token, Spacing),
-    cursor_snapshot: TokenCursor,
-    num_calls: u32,
-    break_last_token: u32,
-    node_replacements: Box<[NodeReplacement]>,
-}
-
-impl ToAttrTokenStream for LazyAttrTokenStreamImpl {
-    fn to_attr_token_stream(&self) -> AttrTokenStream {
-        // The token produced by the final call to `{,inlined_}next` was not
-        // actually consumed by the callback. The combination of chaining the
-        // initial token and using `take` produces the desired result - we
-        // produce an empty `TokenStream` if no calls were made, and omit the
-        // final token otherwise.
-        let mut cursor_snapshot = self.cursor_snapshot.clone();
-        let tokens = iter::once(FlatToken::Token(self.start_token))
-            .chain(iter::repeat_with(|| FlatToken::Token(cursor_snapshot.next())))
-            .take(self.num_calls as usize);
-
-        if self.node_replacements.is_empty() {
-            make_attr_token_stream(tokens, self.break_last_token)
-        } else {
-            let mut tokens: Vec<_> = tokens.collect();
-            let mut node_replacements = self.node_replacements.to_vec();
-            node_replacements.sort_by_key(|(range, _)| range.0.start);
-
-            #[cfg(debug_assertions)]
-            for [(node_range, tokens), (next_node_range, next_tokens)] in
-                node_replacements.array_windows()
-            {
-                assert!(
-                    node_range.0.end <= next_node_range.0.start
-                        || node_range.0.end >= next_node_range.0.end,
-                    "Node ranges should be disjoint or nested: ({:?}, {:?}) ({:?}, {:?})",
-                    node_range,
-                    tokens,
-                    next_node_range,
-                    next_tokens,
-                );
-            }
-
-            // Process the replace ranges, starting from the highest start
-            // position and working our way back. If have tokens like:
-            //
-            // `#[cfg(FALSE)] struct Foo { #[cfg(FALSE)] field: bool }`
-            //
-            // Then we will generate replace ranges for both
-            // the `#[cfg(FALSE)] field: bool` and the entire
-            // `#[cfg(FALSE)] struct Foo { #[cfg(FALSE)] field: bool }`
-            //
-            // By starting processing from the replace range with the greatest
-            // start position, we ensure that any (outer) replace range which
-            // encloses another (inner) replace range will fully overwrite the
-            // inner range's replacement.
-            for (node_range, target) in node_replacements.into_iter().rev() {
-                assert!(
-                    !node_range.0.is_empty(),
-                    "Cannot replace an empty node range: {:?}",
-                    node_range.0
-                );
-
-                // Replace the tokens in range with zero or one `FlatToken::AttrsTarget`s, plus
-                // enough `FlatToken::Empty`s to fill up the rest of the range. This keeps the
-                // total length of `tokens` constant throughout the replacement process, allowing
-                // us to do all replacements without adjusting indices.
-                let target_len = target.is_some() as usize;
-                tokens.splice(
-                    (node_range.0.start as usize)..(node_range.0.end as usize),
-                    target.into_iter().map(|target| FlatToken::AttrsTarget(target)).chain(
-                        iter::repeat(FlatToken::Empty).take(node_range.0.len() - target_len),
-                    ),
-                );
-            }
-            make_attr_token_stream(tokens.into_iter(), self.break_last_token)
-        }
-    }
-}
-
 impl<'a> Parser<'a> {
     pub(super) fn collect_pos(&self) -> CollectPos {
         CollectPos {
@@ -387,10 +295,10 @@ impl<'a> Parser<'a> {
 
         // This is hot enough for `deep-vector` that checking the conditions for an empty iterator
         // is measurably faster than actually executing the iterator.
-        let node_replacements: Box<[_]> = if parser_replacements_start == parser_replacements_end
+        let node_replacements = if parser_replacements_start == parser_replacements_end
             && inner_attr_parser_replacements.is_empty()
         {
-            Box::new([])
+            ThinVec::new()
         } else {
             // Grab any replace ranges that occur *inside* the current AST node. Convert them
             // from `ParserRange` form to `NodeRange` form. We will perform the actual
@@ -429,13 +337,13 @@ impl<'a> Parser<'a> {
         //     - `attrs`: includes the outer and the inner attr.
         //     - `tokens`: lazy tokens for `g` (with its inner attr deleted).
 
-        let tokens = LazyAttrTokenStream::new(LazyAttrTokenStreamImpl {
-            start_token: collect_pos.start_token,
-            cursor_snapshot: collect_pos.cursor_snapshot,
+        let tokens = LazyAttrTokenStream::new_pending(
+            collect_pos.start_token,
+            collect_pos.cursor_snapshot,
             num_calls,
-            break_last_token: self.break_last_token,
+            self.break_last_token,
             node_replacements,
-        });
+        );
         let mut tokens_used = false;
 
         // If in "definite capture mode" we need to register a replace range
@@ -483,71 +391,6 @@ impl<'a> Parser<'a> {
     }
 }
 
-/// Converts a flattened iterator of tokens (including open and close delimiter tokens) into an
-/// `AttrTokenStream`, creating an `AttrTokenTree::Delimited` for each matching pair of open and
-/// close delims.
-fn make_attr_token_stream(
-    iter: impl Iterator<Item = FlatToken>,
-    break_last_token: u32,
-) -> AttrTokenStream {
-    #[derive(Debug)]
-    struct FrameData {
-        // This is `None` for the first frame, `Some` for all others.
-        open_delim_sp: Option<(Delimiter, Span, Spacing)>,
-        inner: Vec<AttrTokenTree>,
-    }
-    // The stack always has at least one element. Storing it separately makes for shorter code.
-    let mut stack_top = FrameData { open_delim_sp: None, inner: vec![] };
-    let mut stack_rest = vec![];
-    for flat_token in iter {
-        match flat_token {
-            FlatToken::Token((token @ Token { kind, span }, spacing)) => {
-                if let Some(delim) = kind.open_delim() {
-                    stack_rest.push(mem::replace(
-                        &mut stack_top,
-                        FrameData { open_delim_sp: Some((delim, span, spacing)), inner: vec![] },
-                    ));
-                } else if let Some(delim) = kind.close_delim() {
-                    let frame_data = mem::replace(&mut stack_top, stack_rest.pop().unwrap());
-                    let (open_delim, open_sp, open_spacing) = frame_data.open_delim_sp.unwrap();
-                    assert!(
-                        open_delim.eq_ignoring_invisible_origin(&delim),
-                        "Mismatched open/close delims: open={open_delim:?} close={span:?}"
-                    );
-                    let dspan = DelimSpan::from_pair(open_sp, span);
-                    let dspacing = DelimSpacing::new(open_spacing, spacing);
-                    let stream = AttrTokenStream::new(frame_data.inner);
-                    let delimited = AttrTokenTree::Delimited(dspan, dspacing, delim, stream);
-                    stack_top.inner.push(delimited);
-                } else {
-                    stack_top.inner.push(AttrTokenTree::Token(token, spacing))
-                }
-            }
-            FlatToken::AttrsTarget(target) => {
-                stack_top.inner.push(AttrTokenTree::AttrsTarget(target))
-            }
-            FlatToken::Empty => {}
-        }
-    }
-
-    if break_last_token > 0 {
-        let last_token = stack_top.inner.pop().unwrap();
-        if let AttrTokenTree::Token(last_token, spacing) = last_token {
-            let (unglued, _) = last_token.kind.break_two_token_op(break_last_token).unwrap();
-
-            // Tokens are always ASCII chars, so we can use byte arithmetic here.
-            let mut first_span = last_token.span.shrink_to_lo();
-            first_span =
-                first_span.with_hi(first_span.lo() + rustc_span::BytePos(break_last_token));
-
-            stack_top.inner.push(AttrTokenTree::Token(Token::new(unglued, first_span), spacing));
-        } else {
-            panic!("Unexpected last token {last_token:?}")
-        }
-    }
-    AttrTokenStream::new(stack_top.inner)
-}
-
 /// Tokens are needed if:
 /// - any non-single-segment attributes (other than doc comments) are present,
 ///   e.g. `rustfmt::skip`; or
@@ -562,14 +405,3 @@ fn needs_tokens(attrs: &[ast::Attribute]) -> bool {
         }
     })
 }
-
-// Some types are used a lot. Make sure they don't unintentionally get bigger.
-#[cfg(target_pointer_width = "64")]
-mod size_asserts {
-    use rustc_data_structures::static_assert_size;
-
-    use super::*;
-    // tidy-alphabetical-start
-    static_assert_size!(LazyAttrTokenStreamImpl, 96);
-    // tidy-alphabetical-end
-}
diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs
index 48df8b59d55..968376678f3 100644
--- a/compiler/rustc_parse/src/parser/mod.rs
+++ b/compiler/rustc_parse/src/parser/mod.rs
@@ -12,7 +12,6 @@ pub mod token_type;
 mod ty;
 
 use std::assert_matches::debug_assert_matches;
-use std::ops::Range;
 use std::{fmt, mem, slice};
 
 use attr_wrapper::{AttrWrapper, UsePreAttrPos};
@@ -25,7 +24,9 @@ use rustc_ast::ptr::P;
 use rustc_ast::token::{
     self, IdentIsRaw, InvisibleOrigin, MetaVarKind, NtExprKind, NtPatKind, Token, TokenKind,
 };
-use rustc_ast::tokenstream::{AttrsTarget, Spacing, TokenStream, TokenTree};
+use rustc_ast::tokenstream::{
+    ParserRange, ParserReplacement, Spacing, TokenCursor, TokenStream, TokenTree, TokenTreeCursor,
+};
 use rustc_ast::util::case::Case;
 use rustc_ast::{
     self as ast, AnonConst, AttrArgs, AttrId, ByRef, Const, CoroutineKind, DUMMY_NODE_ID,
@@ -37,7 +38,7 @@ use rustc_data_structures::fx::FxHashMap;
 use rustc_errors::{Applicability, Diag, FatalError, MultiSpan, PResult};
 use rustc_index::interval::IntervalSet;
 use rustc_session::parse::ParseSess;
-use rustc_span::{DUMMY_SP, Ident, Span, Symbol, kw, sym};
+use rustc_span::{Ident, Span, Symbol, kw, sym};
 use thin_vec::ThinVec;
 use token_type::TokenTypeSet;
 pub use token_type::{ExpKeywordPair, ExpTokenPair, TokenType};
@@ -55,19 +56,64 @@ mod tests;
 mod tokenstream {
     mod tests;
 }
-#[cfg(test)]
-mod mut_visit {
-    mod tests;
-}
 
 bitflags::bitflags! {
+    /// Restrictions applied while parsing.
+    ///
+    /// The parser maintains a bitset of restrictions it will honor while
+    /// parsing. This is essentially used as a way of tracking state of what
+    /// is being parsed and to change behavior based on that.
     #[derive(Clone, Copy, Debug)]
     struct Restrictions: u8 {
+        /// Restricts expressions for use in statement position.
+        ///
+        /// When expressions are used in various places, like statements or
+        /// match arms, this is used to stop parsing once certain tokens are
+        /// reached.
+        ///
+        /// For example, `if true {} & 1` with `STMT_EXPR` in effect is parsed
+        /// as two separate expression statements (`if` and a reference to 1).
+        /// Otherwise it is parsed as a bitwise AND where `if` is on the left
+        /// and 1 is on the right.
         const STMT_EXPR         = 1 << 0;
+        /// Do not allow struct literals.
+        ///
+        /// There are several places in the grammar where we don't want to
+        /// allow struct literals because they can require lookahead, or
+        /// otherwise could be ambiguous or cause confusion. For example,
+        /// `if Foo {} {}` isn't clear if it is `Foo{}` struct literal, or
+        /// just `Foo` is the condition, followed by a consequent block,
+        /// followed by an empty block.
+        ///
+        /// See [RFC 92](https://rust-lang.github.io/rfcs/0092-struct-grammar.html).
         const NO_STRUCT_LITERAL = 1 << 1;
+        /// Used to provide better error messages for const generic arguments.
+        ///
+        /// An un-braced const generic argument is limited to a very small
+        /// subset of expressions. This is used to detect the situation where
+        /// an expression outside of that subset is used, and to suggest to
+        /// wrap the expression in braces.
         const CONST_EXPR        = 1 << 2;
+        /// Allows `let` expressions.
+        ///
+        /// `let pattern = scrutinee` is parsed as an expression, but it is
+        /// only allowed in let chains (`if` and `while` conditions).
+        /// Otherwise it is not an expression (note that `let` in statement
+        /// positions is treated as a `StmtKind::Let` statement, which has a
+        /// slightly different grammar).
         const ALLOW_LET         = 1 << 3;
+        /// Used to detect a missing `=>` in a match guard.
+        ///
+        /// This is used for error handling in a match guard to give a better
+        /// error message if the `=>` is missing. It is set when parsing the
+        /// guard expression.
         const IN_IF_GUARD       = 1 << 4;
+        /// Used to detect the incorrect use of expressions in patterns.
+        ///
+        /// This is used for error handling while parsing a pattern. During
+        /// error recovery, this will be set to try to parse the pattern as an
+        /// expression, but halts parsing the expression when reaching certain
+        /// tokens like `=`.
         const IS_PAT            = 1 << 5;
     }
 }
@@ -187,57 +233,6 @@ struct ClosureSpans {
     body: Span,
 }
 
-/// A token range within a `Parser`'s full token stream.
-#[derive(Clone, Debug)]
-struct ParserRange(Range<u32>);
-
-/// A token range within an individual AST node's (lazy) token stream, i.e.
-/// relative to that node's first token. Distinct from `ParserRange` so the two
-/// kinds of range can't be mixed up.
-#[derive(Clone, Debug)]
-struct NodeRange(Range<u32>);
-
-/// Indicates a range of tokens that should be replaced by an `AttrsTarget`
-/// (replacement) or be replaced by nothing (deletion). This is used in two
-/// places during token collection.
-///
-/// 1. Replacement. During the parsing of an AST node that may have a
-///    `#[derive]` attribute, when we parse a nested AST node that has `#[cfg]`
-///    or `#[cfg_attr]`, we replace the entire inner AST node with
-///    `FlatToken::AttrsTarget`. This lets us perform eager cfg-expansion on an
-///    `AttrTokenStream`.
-///
-/// 2. Deletion. We delete inner attributes from all collected token streams,
-///    and instead track them through the `attrs` field on the AST node. This
-///    lets us manipulate them similarly to outer attributes. When we create a
-///    `TokenStream`, the inner attributes are inserted into the proper place
-///    in the token stream.
-///
-/// Each replacement starts off in `ParserReplacement` form but is converted to
-/// `NodeReplacement` form when it is attached to a single AST node, via
-/// `LazyAttrTokenStreamImpl`.
-type ParserReplacement = (ParserRange, Option<AttrsTarget>);
-
-/// See the comment on `ParserReplacement`.
-type NodeReplacement = (NodeRange, Option<AttrsTarget>);
-
-impl NodeRange {
-    // Converts a range within a parser's tokens to a range within a
-    // node's tokens beginning at `start_pos`.
-    //
-    // For example, imagine a parser with 50 tokens in its token stream, a
-    // function that spans `ParserRange(20..40)` and an inner attribute within
-    // that function that spans `ParserRange(30..35)`. We would find the inner
-    // attribute's range within the function's tokens by subtracting 20, which
-    // is the position of the function's start token. This gives
-    // `NodeRange(10..15)`.
-    fn new(ParserRange(parser_range): ParserRange, start_pos: u32) -> NodeRange {
-        assert!(!parser_range.is_empty());
-        assert!(parser_range.start >= start_pos);
-        NodeRange((parser_range.start - start_pos)..(parser_range.end - start_pos))
-    }
-}
-
 /// Controls how we capture tokens. Capturing can be expensive,
 /// so we try to avoid performing capturing in cases where
 /// we will never need an `AttrTokenStream`.
@@ -260,104 +255,6 @@ struct CaptureState {
     seen_attrs: IntervalSet<AttrId>,
 }
 
-#[derive(Clone, Debug)]
-struct TokenTreeCursor {
-    stream: TokenStream,
-    /// Points to the current token tree in the stream. In `TokenCursor::curr`,
-    /// this can be any token tree. In `TokenCursor::stack`, this is always a
-    /// `TokenTree::Delimited`.
-    index: usize,
-}
-
-impl TokenTreeCursor {
-    #[inline]
-    fn new(stream: TokenStream) -> Self {
-        TokenTreeCursor { stream, index: 0 }
-    }
-
-    #[inline]
-    fn curr(&self) -> Option<&TokenTree> {
-        self.stream.get(self.index)
-    }
-
-    fn look_ahead(&self, n: usize) -> Option<&TokenTree> {
-        self.stream.get(self.index + n)
-    }
-
-    #[inline]
-    fn bump(&mut self) {
-        self.index += 1;
-    }
-}
-
-/// A `TokenStream` cursor that produces `Token`s. It's a bit odd that
-/// we (a) lex tokens into a nice tree structure (`TokenStream`), and then (b)
-/// use this type to emit them as a linear sequence. But a linear sequence is
-/// what the parser expects, for the most part.
-#[derive(Clone, Debug)]
-struct TokenCursor {
-    // Cursor for the current (innermost) token stream. The index within the
-    // cursor can point to any token tree in the stream (or one past the end).
-    // The delimiters for this token stream are found in `self.stack.last()`;
-    // if that is `None` we are in the outermost token stream which never has
-    // delimiters.
-    curr: TokenTreeCursor,
-
-    // Token streams surrounding the current one. The index within each cursor
-    // always points to a `TokenTree::Delimited`.
-    stack: Vec<TokenTreeCursor>,
-}
-
-impl TokenCursor {
-    fn next(&mut self) -> (Token, Spacing) {
-        self.inlined_next()
-    }
-
-    /// This always-inlined version should only be used on hot code paths.
-    #[inline(always)]
-    fn inlined_next(&mut self) -> (Token, Spacing) {
-        loop {
-            // FIXME: we currently don't return `Delimiter::Invisible` open/close delims. To fix
-            // #67062 we will need to, whereupon the `delim != Delimiter::Invisible` conditions
-            // below can be removed.
-            if let Some(tree) = self.curr.curr() {
-                match tree {
-                    &TokenTree::Token(token, spacing) => {
-                        debug_assert!(!token.kind.is_delim());
-                        let res = (token, spacing);
-                        self.curr.bump();
-                        return res;
-                    }
-                    &TokenTree::Delimited(sp, spacing, delim, ref tts) => {
-                        let trees = TokenTreeCursor::new(tts.clone());
-                        self.stack.push(mem::replace(&mut self.curr, trees));
-                        if !delim.skip() {
-                            return (Token::new(delim.as_open_token_kind(), sp.open), spacing.open);
-                        }
-                        // No open delimiter to return; continue on to the next iteration.
-                    }
-                };
-            } else if let Some(parent) = self.stack.pop() {
-                // We have exhausted this token stream. Move back to its parent token stream.
-                let Some(&TokenTree::Delimited(span, spacing, delim, _)) = parent.curr() else {
-                    panic!("parent should be Delimited")
-                };
-                self.curr = parent;
-                self.curr.bump(); // move past the `Delimited`
-                if !delim.skip() {
-                    return (Token::new(delim.as_close_token_kind(), span.close), spacing.close);
-                }
-                // No close delimiter to return; continue on to the next iteration.
-            } else {
-                // We have exhausted the outermost token stream. The use of
-                // `Spacing::Alone` is arbitrary and immaterial, because the
-                // `Eof` token's spacing is never used.
-                return (Token::new(token::Eof, DUMMY_SP), Spacing::Alone);
-            }
-        }
-    }
-}
-
 /// A sequence separator.
 #[derive(Debug)]
 struct SeqSep<'a> {
@@ -1742,26 +1639,6 @@ impl<'a> Parser<'a> {
     }
 }
 
-/// A helper struct used when building an `AttrTokenStream` from
-/// a `LazyAttrTokenStream`. Both delimiter and non-delimited tokens
-/// are stored as `FlatToken::Token`. A vector of `FlatToken`s
-/// is then 'parsed' to build up an `AttrTokenStream` with nested
-/// `AttrTokenTree::Delimited` tokens.
-#[derive(Debug, Clone)]
-enum FlatToken {
-    /// A token - this holds both delimiter (e.g. '{' and '}')
-    /// and non-delimiter tokens
-    Token((Token, Spacing)),
-    /// Holds the `AttrsTarget` for an AST node. The `AttrsTarget` is inserted
-    /// directly into the constructed `AttrTokenStream` as an
-    /// `AttrTokenTree::AttrsTarget`.
-    AttrsTarget(AttrsTarget),
-    /// A special 'empty' token that is ignored during the conversion
-    /// to an `AttrTokenStream`. This is used to simplify the
-    /// handling of replace ranges.
-    Empty,
-}
-
 // Metavar captures of various kinds.
 #[derive(Clone, Debug)]
 pub enum ParseNtResult {
diff --git a/compiler/rustc_parse/src/parser/mut_visit/tests.rs b/compiler/rustc_parse/src/parser/mut_visit/tests.rs
deleted file mode 100644
index 46c678c3902..00000000000
--- a/compiler/rustc_parse/src/parser/mut_visit/tests.rs
+++ /dev/null
@@ -1,65 +0,0 @@
-use rustc_ast as ast;
-use rustc_ast::mut_visit::MutVisitor;
-use rustc_ast_pretty::pprust;
-use rustc_span::{Ident, create_default_session_globals_then};
-
-use crate::parser::tests::{matches_codepattern, string_to_crate};
-
-// This version doesn't care about getting comments or doc-strings in.
-fn print_crate_items(krate: &ast::Crate) -> String {
-    krate.items.iter().map(|i| pprust::item_to_string(i)).collect::<Vec<_>>().join(" ")
-}
-
-// Change every identifier to "zz".
-struct ToZzIdentMutVisitor;
-
-impl MutVisitor for ToZzIdentMutVisitor {
-    const VISIT_TOKENS: bool = true;
-
-    fn visit_ident(&mut self, ident: &mut Ident) {
-        *ident = Ident::from_str("zz");
-    }
-}
-
-macro_rules! assert_matches_codepattern {
-    ($a:expr , $b:expr) => {{
-        let a_val = $a;
-        let b_val = $b;
-        if !matches_codepattern(&a_val, &b_val) {
-            panic!("expected args satisfying `matches_codepattern`, got {} and {}", a_val, b_val);
-        }
-    }};
-}
-
-// Make sure idents get transformed everywhere.
-#[test]
-fn ident_transformation() {
-    create_default_session_globals_then(|| {
-        let mut zz_visitor = ToZzIdentMutVisitor;
-        let mut krate =
-            string_to_crate("#[a] mod b {fn c (d : e, f : g) {h!(i,j,k);l;m}}".to_string());
-        zz_visitor.visit_crate(&mut krate);
-        assert_matches_codepattern!(
-            print_crate_items(&krate),
-            "#[zz]mod zz{fn zz(zz:zz,zz:zz){zz!(zz,zz,zz);zz;zz}}".to_string()
-        );
-    })
-}
-
-// Make sure idents get transformed even inside macro defs.
-#[test]
-fn ident_transformation_in_defs() {
-    create_default_session_globals_then(|| {
-        let mut zz_visitor = ToZzIdentMutVisitor;
-        let mut krate = string_to_crate(
-            "macro_rules! a {(b $c:expr $(d $e:token)f+ => \
-            (g $(d $d $e)+))} "
-                .to_string(),
-        );
-        zz_visitor.visit_crate(&mut krate);
-        assert_matches_codepattern!(
-            print_crate_items(&krate),
-            "macro_rules! zz{(zz$zz:zz$(zz $zz:zz)zz+=>(zz$(zz$zz$zz)+))}".to_string()
-        );
-    })
-}
diff --git a/compiler/rustc_parse/src/parser/tests.rs b/compiler/rustc_parse/src/parser/tests.rs
index 8285070839a..2a44c90abc1 100644
--- a/compiler/rustc_parse/src/parser/tests.rs
+++ b/compiler/rustc_parse/src/parser/tests.rs
@@ -95,12 +95,6 @@ pub(crate) fn string_to_stream(source_str: String) -> TokenStream {
     ))
 }
 
-/// Parses a string, returns a crate.
-pub(crate) fn string_to_crate(source_str: String) -> ast::Crate {
-    let psess = psess();
-    with_error_checking_parse(source_str, &psess, |p| p.parse_crate_mod())
-}
-
 /// Does the given string match the pattern? whitespace in the first string
 /// may be deleted or replaced with other whitespace to match the pattern.
 /// This function is relatively Unicode-ignorant; fortunately, the careful design
diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs
index 3c329dd0a0e..b7d8af2c995 100644
--- a/compiler/rustc_query_impl/src/lib.rs
+++ b/compiler/rustc_query_impl/src/lib.rs
@@ -234,7 +234,7 @@ pub fn query_system<'a>(
     }
 }
 
-rustc_middle::rustc_query_append! { define_queries! }
+rustc_middle::rustc_with_all_queries! { define_queries! }
 
 pub fn provide(providers: &mut rustc_middle::util::Providers) {
     providers.hooks.alloc_self_profile_query_strings = alloc_self_profile_query_strings;
diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs
index 19ccc5587d6..d11fa8bad9b 100644
--- a/compiler/rustc_query_impl/src/plumbing.rs
+++ b/compiler/rustc_query_impl/src/plumbing.rs
@@ -575,11 +575,14 @@ where
 }
 
 // NOTE: `$V` isn't used here, but we still need to match on it so it can be passed to other macros
-// invoked by `rustc_query_append`.
+// invoked by `rustc_with_all_queries`.
 macro_rules! define_queries {
     (
-     $($(#[$attr:meta])*
-        [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
+        $(
+            $(#[$attr:meta])*
+            [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,
+        )*
+    ) => {
 
         pub(crate) mod query_impl { $(pub(crate) mod $name {
             use super::super::*;
diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs
index bae2fdeecaf..faee0e7dd5f 100644
--- a/compiler/rustc_resolve/src/late.rs
+++ b/compiler/rustc_resolve/src/late.rs
@@ -958,6 +958,11 @@ impl<'ra: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'r
                     self.resolve_anon_const(end, AnonConstKind::ConstArg(IsRepeatExpr::No));
                 }
             }
+            TyPatKind::Or(patterns) => {
+                for pat in patterns {
+                    self.visit_ty_pat(pat)
+                }
+            }
             TyPatKind::Err(_) => {}
         }
     }
diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs
index 4a252a7b528..d2da3ac7d86 100644
--- a/compiler/rustc_resolve/src/lib.rs
+++ b/compiler/rustc_resolve/src/lib.rs
@@ -19,6 +19,7 @@
 #![feature(iter_intersperse)]
 #![feature(rustc_attrs)]
 #![feature(rustdoc_internals)]
+#![recursion_limit = "256"]
 // tidy-alphabetical-end
 
 use std::cell::{Cell, RefCell};
diff --git a/compiler/rustc_smir/src/rustc_smir/convert/ty.rs b/compiler/rustc_smir/src/rustc_smir/convert/ty.rs
index 76489dd0913..8bcac4c4678 100644
--- a/compiler/rustc_smir/src/rustc_smir/convert/ty.rs
+++ b/compiler/rustc_smir/src/rustc_smir/convert/ty.rs
@@ -412,6 +412,7 @@ impl<'tcx> Stable<'tcx> for ty::Pattern<'tcx> {
                 end: Some(end.stable(tables)),
                 include_end: true,
             },
+            ty::PatternKind::Or(_) => todo!(),
         }
     }
 }
diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs
index 9f80f8443cb..f8f2714ee42 100644
--- a/compiler/rustc_symbol_mangling/src/v0.rs
+++ b/compiler/rustc_symbol_mangling/src/v0.rs
@@ -253,6 +253,22 @@ impl<'tcx> SymbolMangler<'tcx> {
 
         Ok(())
     }
+
+    fn print_pat(&mut self, pat: ty::Pattern<'tcx>) -> Result<(), std::fmt::Error> {
+        Ok(match *pat {
+            ty::PatternKind::Range { start, end } => {
+                let consts = [start, end];
+                for ct in consts {
+                    Ty::new_array_with_const_len(self.tcx, self.tcx.types.unit, ct).print(self)?;
+                }
+            }
+            ty::PatternKind::Or(patterns) => {
+                for pat in patterns {
+                    self.print_pat(pat)?;
+                }
+            }
+        })
+    }
 }
 
 impl<'tcx> Printer<'tcx> for SymbolMangler<'tcx> {
@@ -469,20 +485,14 @@ impl<'tcx> Printer<'tcx> for SymbolMangler<'tcx> {
                 ty.print(self)?;
             }
 
-            ty::Pat(ty, pat) => match *pat {
-                ty::PatternKind::Range { start, end } => {
-                    let consts = [start, end];
-                    // HACK: Represent as tuple until we have something better.
-                    // HACK: constants are used in arrays, even if the types don't match.
-                    self.push("T");
-                    ty.print(self)?;
-                    for ct in consts {
-                        Ty::new_array_with_const_len(self.tcx, self.tcx.types.unit, ct)
-                            .print(self)?;
-                    }
-                    self.push("E");
-                }
-            },
+            ty::Pat(ty, pat) => {
+                // HACK: Represent as tuple until we have something better.
+                // HACK: constants are used in arrays, even if the types don't match.
+                self.push("T");
+                ty.print(self)?;
+                self.print_pat(pat)?;
+                self.push("E");
+            }
 
             ty::Array(ty, len) => {
                 self.push("A");
diff --git a/compiler/rustc_target/src/spec/targets/armv5te_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/targets/armv5te_unknown_linux_gnueabi.rs
index 52e786de3ed..7b93672dbe0 100644
--- a/compiler/rustc_target/src/spec/targets/armv5te_unknown_linux_gnueabi.rs
+++ b/compiler/rustc_target/src/spec/targets/armv5te_unknown_linux_gnueabi.rs
@@ -20,6 +20,7 @@ pub(crate) fn target() -> Target {
             max_atomic_width: Some(32),
             mcount: "\u{1}__gnu_mcount_nc".into(),
             has_thumb_interworking: true,
+            llvm_mcount_intrinsic: Some("llvm.arm.gnu.eabi.mcount".into()),
             ..base::linux_gnu::opts()
         },
     }
diff --git a/compiler/rustc_target/src/spec/targets/armv7_unknown_linux_gnueabihf.rs b/compiler/rustc_target/src/spec/targets/armv7_unknown_linux_gnueabihf.rs
index 3b5a337b4f1..a3b35d658e9 100644
--- a/compiler/rustc_target/src/spec/targets/armv7_unknown_linux_gnueabihf.rs
+++ b/compiler/rustc_target/src/spec/targets/armv7_unknown_linux_gnueabihf.rs
@@ -22,6 +22,7 @@ pub(crate) fn target() -> Target {
             features: "+v7,+vfp3,-d32,+thumb2,-neon".into(),
             max_atomic_width: Some(64),
             mcount: "\u{1}__gnu_mcount_nc".into(),
+            llvm_mcount_intrinsic: Some("llvm.arm.gnu.eabi.mcount".into()),
             ..base::linux_gnu::opts()
         },
     }
diff --git a/compiler/rustc_trait_selection/Cargo.toml b/compiler/rustc_trait_selection/Cargo.toml
index 1c61e23362a..a5cc8d9ea01 100644
--- a/compiler/rustc_trait_selection/Cargo.toml
+++ b/compiler/rustc_trait_selection/Cargo.toml
@@ -21,7 +21,6 @@ rustc_parse_format = { path = "../rustc_parse_format" }
 rustc_session = { path = "../rustc_session" }
 rustc_span = { path = "../rustc_span" }
 rustc_transmute = { path = "../rustc_transmute", features = ["rustc"] }
-rustc_type_ir = { path = "../rustc_type_ir" }
 smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
 thin-vec = "0.2"
 tracing = "0.1"
diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs
index eba195cb99c..de9a50f1962 100644
--- a/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs
+++ b/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs
@@ -15,10 +15,9 @@ use rustc_middle::ty::adjustment::{Adjust, Adjustment, AutoBorrow};
 use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Print, Printer};
 use rustc_middle::ty::{
     self, GenericArg, GenericArgKind, GenericArgsRef, InferConst, IsSuggestable, Term, TermKind,
-    Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeckResults,
+    Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitableExt, TypeckResults,
 };
 use rustc_span::{BytePos, DUMMY_SP, FileName, Ident, Span, sym};
-use rustc_type_ir::TypeVisitableExt;
 use tracing::{debug, instrument, warn};
 
 use super::nice_region_error::placeholder_error::Highlighted;
diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs
index 242469a225a..b8207c4f816 100644
--- a/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs
+++ b/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs
@@ -11,9 +11,10 @@ use rustc_hir::{self as hir, ParamName};
 use rustc_middle::bug;
 use rustc_middle::traits::ObligationCauseCode;
 use rustc_middle::ty::error::TypeError;
-use rustc_middle::ty::{self, IsSuggestable, Region, Ty, TyCtxt, TypeVisitableExt as _};
+use rustc_middle::ty::{
+    self, IsSuggestable, Region, Ty, TyCtxt, TypeVisitableExt as _, Upcast as _,
+};
 use rustc_span::{BytePos, ErrorGuaranteed, Span, Symbol, kw};
-use rustc_type_ir::Upcast as _;
 use tracing::{debug, instrument};
 
 use super::ObligationCauseAsDiagArg;
diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/overflow.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/overflow.rs
index c5ed74420d4..d929ecf68bf 100644
--- a/compiler/rustc_trait_selection/src/error_reporting/traits/overflow.rs
+++ b/compiler/rustc_trait_selection/src/error_reporting/traits/overflow.rs
@@ -5,10 +5,9 @@ use rustc_hir::def::Namespace;
 use rustc_hir::def_id::LOCAL_CRATE;
 use rustc_infer::traits::{Obligation, PredicateObligation};
 use rustc_middle::ty::print::{FmtPrinter, Print};
-use rustc_middle::ty::{self, TyCtxt};
+use rustc_middle::ty::{self, TyCtxt, Upcast};
 use rustc_session::Limit;
 use rustc_span::Span;
-use rustc_type_ir::Upcast;
 use tracing::debug;
 
 use crate::error_reporting::TypeErrCtxt;
diff --git a/compiler/rustc_trait_selection/src/solve.rs b/compiler/rustc_trait_selection/src/solve.rs
index 0c2451a80a7..5a5d16167d2 100644
--- a/compiler/rustc_trait_selection/src/solve.rs
+++ b/compiler/rustc_trait_selection/src/solve.rs
@@ -11,6 +11,6 @@ pub use fulfill::{FulfillmentCtxt, NextSolverError};
 pub(crate) use normalize::deeply_normalize_for_diagnostics;
 pub use normalize::{
     deeply_normalize, deeply_normalize_with_skipped_universes,
-    deeply_normalize_with_skipped_universes_and_ambiguous_goals,
+    deeply_normalize_with_skipped_universes_and_ambiguous_coroutine_goals,
 };
 pub use select::InferCtxtSelectExt;
diff --git a/compiler/rustc_trait_selection/src/solve/delegate.rs b/compiler/rustc_trait_selection/src/solve/delegate.rs
index a87c5ad6db9..ef64da13189 100644
--- a/compiler/rustc_trait_selection/src/solve/delegate.rs
+++ b/compiler/rustc_trait_selection/src/solve/delegate.rs
@@ -8,10 +8,10 @@ use rustc_infer::infer::canonical::{
 };
 use rustc_infer::infer::{InferCtxt, RegionVariableOrigin, TyCtxtInferExt};
 use rustc_infer::traits::solve::Goal;
-use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable, TypeVisitableExt as _};
+use rustc_middle::traits::query::NoSolution;
+use rustc_middle::traits::solve::Certainty;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable, TypeVisitableExt as _, TypingMode};
 use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span};
-use rustc_type_ir::TypingMode;
-use rustc_type_ir::solve::{Certainty, NoSolution};
 
 use crate::traits::{EvaluateConstErr, specialization_graph};
 
@@ -155,7 +155,7 @@ impl<'tcx> rustc_next_trait_solver::delegate::SolverDelegate for SolverDelegate<
 
     fn register_hidden_type_in_storage(
         &self,
-        opaque_type_key: rustc_type_ir::OpaqueTypeKey<Self::Interner>,
+        opaque_type_key: ty::OpaqueTypeKey<'tcx>,
         hidden_ty: <Self::Interner as ty::Interner>::Ty,
         span: <Self::Interner as ty::Interner>::Span,
     ) -> Option<<Self::Interner as ty::Interner>::Ty> {
diff --git a/compiler/rustc_trait_selection/src/solve/fulfill.rs b/compiler/rustc_trait_selection/src/solve/fulfill.rs
index 1f4fa5aac10..3e1cdac84df 100644
--- a/compiler/rustc_trait_selection/src/solve/fulfill.rs
+++ b/compiler/rustc_trait_selection/src/solve/fulfill.rs
@@ -10,11 +10,10 @@ use rustc_infer::traits::{
     FromSolverError, PredicateObligation, PredicateObligations, TraitEngine,
 };
 use rustc_middle::ty::{
-    self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor, TypingMode,
+    self, DelayedSet, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor, TypingMode,
 };
 use rustc_next_trait_solver::solve::{GenerateProofTree, HasChanged, SolverDelegateEvalExt as _};
 use rustc_span::Span;
-use rustc_type_ir::data_structures::DelayedSet;
 use tracing::instrument;
 
 use self::derive_errors::*;
diff --git a/compiler/rustc_trait_selection/src/solve/fulfill/derive_errors.rs b/compiler/rustc_trait_selection/src/solve/fulfill/derive_errors.rs
index a024f432450..2d445dd0790 100644
--- a/compiler/rustc_trait_selection/src/solve/fulfill/derive_errors.rs
+++ b/compiler/rustc_trait_selection/src/solve/fulfill/derive_errors.rs
@@ -7,11 +7,11 @@ use rustc_infer::traits::{
     self, MismatchedProjectionTypes, Obligation, ObligationCause, ObligationCauseCode,
     PredicateObligation, SelectionError,
 };
+use rustc_middle::traits::query::NoSolution;
 use rustc_middle::ty::error::{ExpectedFound, TypeError};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_middle::{bug, span_bug};
 use rustc_next_trait_solver::solve::{GenerateProofTree, SolverDelegateEvalExt as _};
-use rustc_type_ir::solve::NoSolution;
 use tracing::{instrument, trace};
 
 use crate::solve::delegate::SolverDelegate;
diff --git a/compiler/rustc_trait_selection/src/solve/normalize.rs b/compiler/rustc_trait_selection/src/solve/normalize.rs
index 5f1e63ab225..d903f94b489 100644
--- a/compiler/rustc_trait_selection/src/solve/normalize.rs
+++ b/compiler/rustc_trait_selection/src/solve/normalize.rs
@@ -45,9 +45,11 @@ where
     T: TypeFoldable<TyCtxt<'tcx>>,
     E: FromSolverError<'tcx, NextSolverError<'tcx>>,
 {
-    let (value, goals) =
-        deeply_normalize_with_skipped_universes_and_ambiguous_goals(at, value, universes)?;
-    assert_eq!(goals, vec![]);
+    let (value, coroutine_goals) =
+        deeply_normalize_with_skipped_universes_and_ambiguous_coroutine_goals(
+            at, value, universes,
+        )?;
+    assert_eq!(coroutine_goals, vec![]);
 
     Ok(value)
 }
@@ -59,9 +61,9 @@ where
 /// entered before passing `value` to the function. This is currently needed for
 /// `normalize_erasing_regions`, which skips binders as it walks through a type.
 ///
-/// This returns a set of stalled obligations if the typing mode of the underlying infcx
-/// has any stalled coroutine def ids.
-pub fn deeply_normalize_with_skipped_universes_and_ambiguous_goals<'tcx, T, E>(
+/// This returns a set of stalled obligations involving coroutines if the typing mode of
+/// the underlying infcx has any stalled coroutine def ids.
+pub fn deeply_normalize_with_skipped_universes_and_ambiguous_coroutine_goals<'tcx, T, E>(
     at: At<'_, 'tcx>,
     value: T,
     universes: Vec<Option<UniverseIndex>>,
@@ -71,11 +73,16 @@ where
     E: FromSolverError<'tcx, NextSolverError<'tcx>>,
 {
     let fulfill_cx = FulfillmentCtxt::new(at.infcx);
-    let mut folder =
-        NormalizationFolder { at, fulfill_cx, depth: 0, universes, stalled_goals: vec![] };
+    let mut folder = NormalizationFolder {
+        at,
+        fulfill_cx,
+        depth: 0,
+        universes,
+        stalled_coroutine_goals: vec![],
+    };
     let value = value.try_fold_with(&mut folder)?;
     let errors = folder.fulfill_cx.select_all_or_error(at.infcx);
-    if errors.is_empty() { Ok((value, folder.stalled_goals)) } else { Err(errors) }
+    if errors.is_empty() { Ok((value, folder.stalled_coroutine_goals)) } else { Err(errors) }
 }
 
 struct NormalizationFolder<'me, 'tcx, E> {
@@ -83,7 +90,7 @@ struct NormalizationFolder<'me, 'tcx, E> {
     fulfill_cx: FulfillmentCtxt<'tcx, E>,
     depth: usize,
     universes: Vec<Option<UniverseIndex>>,
-    stalled_goals: Vec<Goal<'tcx, ty::Predicate<'tcx>>>,
+    stalled_coroutine_goals: Vec<Goal<'tcx, ty::Predicate<'tcx>>>,
 }
 
 impl<'tcx, E> NormalizationFolder<'_, 'tcx, E>
@@ -182,7 +189,7 @@ where
             return Err(errors);
         }
 
-        self.stalled_goals.extend(
+        self.stalled_coroutine_goals.extend(
             self.fulfill_cx
                 .drain_stalled_obligations_for_coroutines(self.at.infcx)
                 .into_iter()
@@ -298,13 +305,13 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for DeeplyNormalizeForDiagnosticsFolder<'_,
 
     fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
         let infcx = self.at.infcx;
-        let result =
-            infcx.commit_if_ok(|_| {
-                deeply_normalize_with_skipped_universes_and_ambiguous_goals::<
-                    _,
-                    ScrubbedTraitError<'tcx>,
-                >(self.at, ty, vec![None; ty.outer_exclusive_binder().as_usize()])
-            });
+        let result: Result<_, Vec<ScrubbedTraitError<'tcx>>> = infcx.commit_if_ok(|_| {
+            deeply_normalize_with_skipped_universes_and_ambiguous_coroutine_goals(
+                self.at,
+                ty,
+                vec![None; ty.outer_exclusive_binder().as_usize()],
+            )
+        });
         match result {
             Ok((ty, _)) => ty,
             Err(_) => ty.super_fold_with(self),
@@ -313,13 +320,13 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for DeeplyNormalizeForDiagnosticsFolder<'_,
 
     fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
         let infcx = self.at.infcx;
-        let result =
-            infcx.commit_if_ok(|_| {
-                deeply_normalize_with_skipped_universes_and_ambiguous_goals::<
-                    _,
-                    ScrubbedTraitError<'tcx>,
-                >(self.at, ct, vec![None; ct.outer_exclusive_binder().as_usize()])
-            });
+        let result: Result<_, Vec<ScrubbedTraitError<'tcx>>> = infcx.commit_if_ok(|_| {
+            deeply_normalize_with_skipped_universes_and_ambiguous_coroutine_goals(
+                self.at,
+                ct,
+                vec![None; ct.outer_exclusive_binder().as_usize()],
+            )
+        });
         match result {
             Ok((ct, _)) => ct,
             Err(_) => ct.super_fold_with(self),
diff --git a/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs b/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs
index 519394685a8..220a847cc23 100644
--- a/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs
+++ b/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs
@@ -13,9 +13,9 @@ use rustc_middle::query::Providers;
 use rustc_middle::ty::{
     self, EarlyBinder, GenericArgs, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable,
     TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor, TypingMode, Upcast,
+    elaborate,
 };
 use rustc_span::Span;
-use rustc_type_ir::elaborate;
 use smallvec::SmallVec;
 use tracing::{debug, instrument};
 
diff --git a/compiler/rustc_trait_selection/src/traits/effects.rs b/compiler/rustc_trait_selection/src/traits/effects.rs
index defbafac20b..cc5861b5a1f 100644
--- a/compiler/rustc_trait_selection/src/traits/effects.rs
+++ b/compiler/rustc_trait_selection/src/traits/effects.rs
@@ -4,10 +4,10 @@ use rustc_infer::traits::{
     ImplDerivedHostCause, ImplSource, Obligation, ObligationCauseCode, PredicateObligation,
 };
 use rustc_middle::span_bug;
+use rustc_middle::traits::query::NoSolution;
+use rustc_middle::ty::elaborate::elaborate;
 use rustc_middle::ty::fast_reject::DeepRejectCtxt;
 use rustc_middle::ty::{self, TypingMode};
-use rustc_type_ir::elaborate::elaborate;
-use rustc_type_ir::solve::NoSolution;
 use thin_vec::{ThinVec, thin_vec};
 
 use super::SelectionContext;
@@ -252,6 +252,9 @@ fn evaluate_host_effect_for_destruct_goal<'tcx>(
     let self_ty = obligation.predicate.self_ty();
 
     let const_conditions = match *self_ty.kind() {
+        // `ManuallyDrop` is trivially `~const Destruct` as we do not run any drop glue on it.
+        ty::Adt(adt_def, _) if adt_def.is_manually_drop() => thin_vec![],
+
         // An ADT is `~const Destruct` only if all of the fields are,
         // *and* if there is a `Drop` impl, that `Drop` impl is also `~const`.
         ty::Adt(adt_def, args) => {
diff --git a/compiler/rustc_trait_selection/src/traits/engine.rs b/compiler/rustc_trait_selection/src/traits/engine.rs
index 9f3178f8879..8d6e6b4a651 100644
--- a/compiler/rustc_trait_selection/src/traits/engine.rs
+++ b/compiler/rustc_trait_selection/src/traits/engine.rs
@@ -14,8 +14,8 @@ use rustc_macros::extension;
 use rustc_middle::arena::ArenaAllocatable;
 use rustc_middle::traits::query::NoSolution;
 use rustc_middle::ty::error::TypeError;
+use rustc_middle::ty::relate::Relate;
 use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable, Upcast, Variance};
-use rustc_type_ir::relate::Relate;
 
 use super::{FromSolverError, FulfillmentContext, ScrubbedTraitError, TraitEngine};
 use crate::error_reporting::InferCtxtErrorExt;
diff --git a/compiler/rustc_trait_selection/src/traits/normalize.rs b/compiler/rustc_trait_selection/src/traits/normalize.rs
index d38ddbc825c..5f0acd46f86 100644
--- a/compiler/rustc_trait_selection/src/traits/normalize.rs
+++ b/compiler/rustc_trait_selection/src/traits/normalize.rs
@@ -260,11 +260,14 @@ impl<'a, 'b, 'tcx> TypeFolder<TyCtxt<'tcx>> for AssocTypeNormalizer<'a, 'b, 'tcx
             }
 
             ty::Projection if !data.has_escaping_bound_vars() => {
-                // This branch is *mostly* just an optimization: when we don't
-                // have escaping bound vars, we don't need to replace them with
-                // placeholders (see branch below). *Also*, we know that we can
-                // register an obligation to *later* project, since we know
-                // there won't be bound vars there.
+                // When we don't have escaping bound vars we can normalize ambig aliases
+                // to inference variables (done in `normalize_projection_ty`). This would
+                // be wrong if there were escaping bound vars as even if we instantiated
+                // the bound vars with placeholders, we wouldn't be able to map them back
+                // after normalization succeeded.
+                //
+                // Also, as an optimization: when we don't have escaping bound vars, we don't
+                // need to replace them with placeholders (see branch below).
                 let data = data.fold_with(self);
                 let normalized_ty = project::normalize_projection_ty(
                     self.selcx,
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs
index 1df69932c64..d9b57f0c67d 100644
--- a/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs
@@ -6,10 +6,10 @@ use rustc_infer::traits::query::OutlivesBound;
 use rustc_infer::traits::query::type_op::ImpliedOutlivesBounds;
 use rustc_middle::infer::canonical::CanonicalQueryResponse;
 use rustc_middle::traits::ObligationCause;
+use rustc_middle::ty::outlives::{Component, push_outlives_components};
 use rustc_middle::ty::{self, ParamEnvAnd, Ty, TyCtxt, TypeVisitable, TypeVisitor};
 use rustc_span::def_id::CRATE_DEF_ID;
 use rustc_span::{DUMMY_SP, Span, sym};
-use rustc_type_ir::outlives::{Component, push_outlives_components};
 use smallvec::{SmallVec, smallvec};
 
 use crate::traits::query::NoSolution;
diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
index 5d0b9dd41b2..10a2ba049d8 100644
--- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
@@ -14,9 +14,8 @@ use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
 use rustc_hir as hir;
 use rustc_infer::traits::{Obligation, PolyTraitObligation, SelectionError};
 use rustc_middle::ty::fast_reject::DeepRejectCtxt;
-use rustc_middle::ty::{self, Ty, TypeVisitableExt, TypingMode};
+use rustc_middle::ty::{self, Ty, TypeVisitableExt, TypingMode, elaborate};
 use rustc_middle::{bug, span_bug};
-use rustc_type_ir::elaborate;
 use tracing::{debug, instrument, trace};
 
 use super::SelectionCandidate::*;
diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
index d71d1e9ae0f..8008c7e4d34 100644
--- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
@@ -15,10 +15,9 @@ use rustc_hir::lang_items::LangItem;
 use rustc_infer::infer::{DefineOpaqueTypes, HigherRankedType, InferOk};
 use rustc_infer::traits::ObligationCauseCode;
 use rustc_middle::traits::{BuiltinImplSource, SignatureMismatchData};
-use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt, Upcast};
+use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt, Upcast, elaborate};
 use rustc_middle::{bug, span_bug};
 use rustc_span::def_id::DefId;
-use rustc_type_ir::elaborate;
 use thin_vec::thin_vec;
 use tracing::{debug, instrument};
 
diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs
index df02a67c2c9..4ce37db4280 100644
--- a/compiler/rustc_trait_selection/src/traits/select/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs
@@ -28,10 +28,9 @@ use rustc_middle::ty::error::TypeErrorToStringExt;
 use rustc_middle::ty::print::{PrintTraitRefExt as _, with_no_trimmed_paths};
 use rustc_middle::ty::{
     self, GenericArgsRef, PolyProjectionPredicate, Ty, TyCtxt, TypeFoldable, TypeVisitableExt,
-    TypingMode, Upcast,
+    TypingMode, Upcast, elaborate,
 };
 use rustc_span::{Symbol, sym};
-use rustc_type_ir::elaborate;
 use tracing::{debug, instrument, trace};
 
 use self::EvaluationResult::*;
diff --git a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
index 448ac558cad..b30fadd3e5b 100644
--- a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
@@ -18,11 +18,11 @@ use rustc_hir::def_id::{DefId, LocalDefId};
 use rustc_infer::traits::Obligation;
 use rustc_middle::bug;
 use rustc_middle::query::LocalCrate;
+use rustc_middle::traits::query::NoSolution;
 use rustc_middle::ty::print::PrintTraitRefExt as _;
 use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt, TypeVisitableExt, TypingMode};
 use rustc_session::lint::builtin::COHERENCE_LEAK_CHECK;
 use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span, sym};
-use rustc_type_ir::solve::NoSolution;
 use specialization_graph::GraphExt;
 use tracing::{debug, instrument};
 
diff --git a/compiler/rustc_trait_selection/src/traits/wf.rs b/compiler/rustc_trait_selection/src/traits/wf.rs
index 00a4a58a6d8..2e32cda7602 100644
--- a/compiler/rustc_trait_selection/src/traits/wf.rs
+++ b/compiler/rustc_trait_selection/src/traits/wf.rs
@@ -658,6 +658,50 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
             // ```
         }
     }
+
+    fn add_wf_preds_for_pat_ty(&mut self, base_ty: Ty<'tcx>, pat: ty::Pattern<'tcx>) {
+        let tcx = self.tcx();
+        match *pat {
+            ty::PatternKind::Range { start, end } => {
+                let mut check = |c| {
+                    let cause = self.cause(ObligationCauseCode::Misc);
+                    self.out.push(traits::Obligation::with_depth(
+                        tcx,
+                        cause.clone(),
+                        self.recursion_depth,
+                        self.param_env,
+                        ty::Binder::dummy(ty::PredicateKind::Clause(
+                            ty::ClauseKind::ConstArgHasType(c, base_ty),
+                        )),
+                    ));
+                    if !tcx.features().generic_pattern_types() {
+                        if c.has_param() {
+                            if self.span.is_dummy() {
+                                self.tcx()
+                                    .dcx()
+                                    .delayed_bug("feature error should be reported elsewhere, too");
+                            } else {
+                                feature_err(
+                                    &self.tcx().sess,
+                                    sym::generic_pattern_types,
+                                    self.span,
+                                    "wraparound pattern type ranges cause monomorphization time errors",
+                                )
+                                .emit();
+                            }
+                        }
+                    }
+                };
+                check(start);
+                check(end);
+            }
+            ty::PatternKind::Or(patterns) => {
+                for pat in patterns {
+                    self.add_wf_preds_for_pat_ty(base_ty, pat)
+                }
+            }
+        }
+    }
 }
 
 impl<'a, 'tcx> TypeVisitor<TyCtxt<'tcx>> for WfPredicates<'a, 'tcx> {
@@ -710,43 +754,9 @@ impl<'a, 'tcx> TypeVisitor<TyCtxt<'tcx>> for WfPredicates<'a, 'tcx> {
                 ));
             }
 
-            ty::Pat(subty, pat) => {
-                self.require_sized(subty, ObligationCauseCode::Misc);
-                match *pat {
-                    ty::PatternKind::Range { start, end } => {
-                        let mut check = |c| {
-                            let cause = self.cause(ObligationCauseCode::Misc);
-                            self.out.push(traits::Obligation::with_depth(
-                                tcx,
-                                cause.clone(),
-                                self.recursion_depth,
-                                self.param_env,
-                                ty::Binder::dummy(ty::PredicateKind::Clause(
-                                    ty::ClauseKind::ConstArgHasType(c, subty),
-                                )),
-                            ));
-                            if !tcx.features().generic_pattern_types() {
-                                if c.has_param() {
-                                    if self.span.is_dummy() {
-                                        self.tcx().dcx().delayed_bug(
-                                            "feature error should be reported elsewhere, too",
-                                        );
-                                    } else {
-                                        feature_err(
-                                            &self.tcx().sess,
-                                            sym::generic_pattern_types,
-                                            self.span,
-                                            "wraparound pattern type ranges cause monomorphization time errors",
-                                        )
-                                        .emit();
-                                    }
-                                }
-                            }
-                        };
-                        check(start);
-                        check(end);
-                    }
-                }
+            ty::Pat(base_ty, pat) => {
+                self.require_sized(base_ty, ObligationCauseCode::Misc);
+                self.add_wf_preds_for_pat_ty(base_ty, pat);
             }
 
             ty::Tuple(tys) => {
diff --git a/compiler/rustc_transmute/Cargo.toml b/compiler/rustc_transmute/Cargo.toml
index 0250cc0ea07..246b66d3d03 100644
--- a/compiler/rustc_transmute/Cargo.toml
+++ b/compiler/rustc_transmute/Cargo.toml
@@ -5,7 +5,6 @@ edition = "2024"
 
 [dependencies]
 # tidy-alphabetical-start
-itertools = "0.12"
 rustc_abi = { path = "../rustc_abi", optional = true }
 rustc_data_structures = { path = "../rustc_data_structures" }
 rustc_hir = { path = "../rustc_hir", optional = true }
@@ -15,6 +14,11 @@ smallvec = "1.8.1"
 tracing = "0.1"
 # tidy-alphabetical-end
 
+[dev-dependencies]
+# tidy-alphabetical-start
+itertools = "0.12"
+# tidy-alphabetical-end
+
 [features]
 rustc = [
     "dep:rustc_abi",
diff --git a/compiler/rustc_transmute/src/layout/dfa.rs b/compiler/rustc_transmute/src/layout/dfa.rs
index d1f58157b69..05afa28db31 100644
--- a/compiler/rustc_transmute/src/layout/dfa.rs
+++ b/compiler/rustc_transmute/src/layout/dfa.rs
@@ -1,5 +1,5 @@
 use std::fmt;
-use std::ops::RangeInclusive;
+use std::iter::Peekable;
 use std::sync::atomic::{AtomicU32, Ordering};
 
 use super::{Byte, Ref, Tree, Uninhabited};
@@ -211,15 +211,15 @@ where
             let b_transitions =
                 b_src.and_then(|b_src| b.transitions.get(&b_src)).unwrap_or(&empty_transitions);
 
-            let byte_transitions =
-                a_transitions.byte_transitions.union(&b_transitions.byte_transitions);
-
-            let byte_transitions = byte_transitions.map_states(|(a_dst, b_dst)| {
-                assert!(a_dst.is_some() || b_dst.is_some());
+            let byte_transitions = a_transitions.byte_transitions.union(
+                &b_transitions.byte_transitions,
+                |a_dst, b_dst| {
+                    assert!(a_dst.is_some() || b_dst.is_some());
 
-                queue.enqueue(a_dst, b_dst);
-                mapped((a_dst, b_dst))
-            });
+                    queue.enqueue(a_dst, b_dst);
+                    mapped((a_dst, b_dst))
+                },
+            );
 
             let ref_transitions =
                 a_transitions.ref_transitions.keys().chain(b_transitions.ref_transitions.keys());
@@ -245,18 +245,6 @@ where
         Self { transitions, start, accept }
     }
 
-    pub(crate) fn states_from(
-        &self,
-        state: State,
-        src_validity: RangeInclusive<u8>,
-    ) -> impl Iterator<Item = (Byte, State)> {
-        self.transitions
-            .get(&state)
-            .map(move |t| t.byte_transitions.states_from(src_validity))
-            .into_iter()
-            .flatten()
-    }
-
     pub(crate) fn get_uninit_edge_dst(&self, state: State) -> Option<State> {
         let transitions = self.transitions.get(&state)?;
         transitions.byte_transitions.get_uninit_edge_dst()
@@ -334,95 +322,31 @@ where
 
 use edge_set::EdgeSet;
 mod edge_set {
-    use std::cmp;
-
-    use run::*;
-    use smallvec::{SmallVec, smallvec};
+    use smallvec::SmallVec;
 
     use super::*;
-    mod run {
-        use std::ops::{Range, RangeInclusive};
-
-        use super::*;
-        use crate::layout::Byte;
-
-        /// A logical set of edges.
-        ///
-        /// A `Run` encodes one edge for every byte value in `start..=end`
-        /// pointing to `dst`.
-        #[derive(Eq, PartialEq, Copy, Clone, Debug)]
-        pub(super) struct Run<S> {
-            // `start` and `end` are both inclusive (ie, closed) bounds, as this
-            // is required in order to be able to store 0..=255. We provide
-            // setters and getters which operate on closed/open ranges, which
-            // are more intuitive and easier for performing offset math.
-            start: u8,
-            end: u8,
-            pub(super) dst: S,
-        }
-
-        impl<S> Run<S> {
-            pub(super) fn new(range: RangeInclusive<u8>, dst: S) -> Self {
-                Self { start: *range.start(), end: *range.end(), dst }
-            }
-
-            pub(super) fn from_inclusive_exclusive(range: Range<u16>, dst: S) -> Self {
-                Self {
-                    start: range.start.try_into().unwrap(),
-                    end: (range.end - 1).try_into().unwrap(),
-                    dst,
-                }
-            }
-
-            pub(super) fn contains(&self, idx: u16) -> bool {
-                idx >= u16::from(self.start) && idx <= u16::from(self.end)
-            }
-
-            pub(super) fn as_inclusive_exclusive(&self) -> (u16, u16) {
-                (u16::from(self.start), u16::from(self.end) + 1)
-            }
-
-            pub(super) fn as_byte(&self) -> Byte {
-                Byte::new(self.start..=self.end)
-            }
 
-            pub(super) fn map_state<SS>(self, f: impl FnOnce(S) -> SS) -> Run<SS> {
-                let Run { start, end, dst } = self;
-                Run { start, end, dst: f(dst) }
-            }
-
-            /// Produces a new `Run` whose lower bound is the greater of
-            /// `self`'s existing lower bound and `lower_bound`.
-            pub(super) fn clamp_lower(self, lower_bound: u8) -> Self {
-                let Run { start, end, dst } = self;
-                Run { start: cmp::max(start, lower_bound), end, dst }
-            }
-        }
-    }
-
-    /// The set of outbound byte edges associated with a DFA node (not including
-    /// reference edges).
+    /// The set of outbound byte edges associated with a DFA node.
     #[derive(Eq, PartialEq, Clone, Debug)]
     pub(super) struct EdgeSet<S = State> {
-        // A sequence of runs stored in ascending order. Since the graph is a
-        // DFA, these must be non-overlapping with one another.
-        runs: SmallVec<[Run<S>; 1]>,
-        // The edge labeled with the uninit byte, if any.
+        // A sequence of byte edges with contiguous byte values and a common
+        // destination is stored as a single run.
         //
-        // FIXME(@joshlf): Make `State` a `NonZero` so that this is NPO'd.
-        uninit: Option<S>,
+        // Runs are non-empty, non-overlapping, and stored in ascending order.
+        runs: SmallVec<[(Byte, S); 1]>,
     }
 
     impl<S> EdgeSet<S> {
-        pub(crate) fn new(byte: Byte, dst: S) -> Self {
-            match byte.range() {
-                Some(range) => Self { runs: smallvec![Run::new(range, dst)], uninit: None },
-                None => Self { runs: SmallVec::new(), uninit: Some(dst) },
+        pub(crate) fn new(range: Byte, dst: S) -> Self {
+            let mut this = Self { runs: SmallVec::new() };
+            if !range.is_empty() {
+                this.runs.push((range, dst));
             }
+            this
         }
 
         pub(crate) fn empty() -> Self {
-            Self { runs: SmallVec::new(), uninit: None }
+            Self { runs: SmallVec::new() }
         }
 
         #[cfg(test)]
@@ -431,43 +355,23 @@ mod edge_set {
             S: Ord,
         {
             edges.sort();
-            Self {
-                runs: edges
-                    .into_iter()
-                    .map(|(byte, state)| Run::new(byte.range().unwrap(), state))
-                    .collect(),
-                uninit: None,
-            }
+            Self { runs: edges.into() }
         }
 
         pub(crate) fn iter(&self) -> impl Iterator<Item = (Byte, S)>
         where
             S: Copy,
         {
-            self.uninit
-                .map(|dst| (Byte::uninit(), dst))
-                .into_iter()
-                .chain(self.runs.iter().map(|run| (run.as_byte(), run.dst)))
-        }
-
-        pub(crate) fn states_from(
-            &self,
-            byte: RangeInclusive<u8>,
-        ) -> impl Iterator<Item = (Byte, S)>
-        where
-            S: Copy,
-        {
-            // FIXME(@joshlf): Optimize this. A manual scan over `self.runs` may
-            // permit us to more efficiently discard runs which will not be
-            // produced by this iterator.
-            self.iter().filter(move |(o, _)| Byte::new(byte.clone()).transmutable_into(&o))
+            self.runs.iter().copied()
         }
 
         pub(crate) fn get_uninit_edge_dst(&self) -> Option<S>
         where
             S: Copy,
         {
-            self.uninit
+            // Uninit is ordered last.
+            let &(range, dst) = self.runs.last()?;
+            if range.contains_uninit() { Some(dst) } else { None }
         }
 
         pub(crate) fn map_states<SS>(self, mut f: impl FnMut(S) -> SS) -> EdgeSet<SS> {
@@ -478,95 +382,106 @@ mod edge_set {
                 // allocates the correct number of elements once up-front [1].
                 //
                 // [1] https://doc.rust-lang.org/1.85.0/src/alloc/vec/spec_from_iter_nested.rs.html#47
-                runs: self.runs.into_iter().map(|run| run.map_state(&mut f)).collect(),
-                uninit: self.uninit.map(f),
+                runs: self.runs.into_iter().map(|(b, s)| (b, f(s))).collect(),
             }
         }
 
         /// Unions two edge sets together.
         ///
         /// If `u = a.union(b)`, then for each byte value, `u` will have an edge
-        /// with that byte value and with the destination `(Some(_), None)`,
-        /// `(None, Some(_))`, or `(Some(_), Some(_))` depending on whether `a`,
+        /// with that byte value and with the destination `join(Some(_), None)`,
+        /// `join(None, Some(_))`, or `join(Some(_), Some(_))` depending on whether `a`,
         /// `b`, or both have an edge with that byte value.
         ///
         /// If neither `a` nor `b` have an edge with a particular byte value,
         /// then no edge with that value will be present in `u`.
-        pub(crate) fn union(&self, other: &Self) -> EdgeSet<(Option<S>, Option<S>)>
+        pub(crate) fn union(
+            &self,
+            other: &Self,
+            mut join: impl FnMut(Option<S>, Option<S>) -> S,
+        ) -> EdgeSet<S>
         where
             S: Copy,
         {
-            let uninit = match (self.uninit, other.uninit) {
-                (None, None) => None,
-                (s, o) => Some((s, o)),
-            };
-
-            let mut runs = SmallVec::new();
-
-            // Iterate over `self.runs` and `other.runs` simultaneously,
-            // advancing `idx` as we go. At each step, we advance `idx` as far
-            // as we can without crossing a run boundary in either `self.runs`
-            // or `other.runs`.
-
-            // INVARIANT: `idx < s[0].end && idx < o[0].end`.
-            let (mut s, mut o) = (self.runs.as_slice(), other.runs.as_slice());
-            let mut idx = 0u16;
-            while let (Some((s_run, s_rest)), Some((o_run, o_rest))) =
-                (s.split_first(), o.split_first())
-            {
-                let (s_start, s_end) = s_run.as_inclusive_exclusive();
-                let (o_start, o_end) = o_run.as_inclusive_exclusive();
-
-                // Compute `end` as the end of the current run (which starts
-                // with `idx`).
-                let (end, dst) = match (s_run.contains(idx), o_run.contains(idx)) {
-                    // `idx` is in an existing run in both `s` and `o`, so `end`
-                    // is equal to the smallest of the two ends of those runs.
-                    (true, true) => (cmp::min(s_end, o_end), (Some(s_run.dst), Some(o_run.dst))),
-                    // `idx` is in an existing run in `s`, but not in any run in
-                    // `o`. `end` is either the end of the `s` run or the
-                    // beginning of the next `o` run, whichever comes first.
-                    (true, false) => (cmp::min(s_end, o_start), (Some(s_run.dst), None)),
-                    // The inverse of the previous case.
-                    (false, true) => (cmp::min(s_start, o_end), (None, Some(o_run.dst))),
-                    // `idx` is not in a run in either `s` or `o`, so advance it
-                    // to the beginning of the next run.
-                    (false, false) => {
-                        idx = cmp::min(s_start, o_start);
-                        continue;
-                    }
-                };
+            let xs = self.runs.iter().copied();
+            let ys = other.runs.iter().copied();
+            // FIXME(@joshlf): Merge contiguous runs with common destination.
+            EdgeSet { runs: union(xs, ys).map(|(range, (x, y))| (range, join(x, y))).collect() }
+        }
+    }
+}
+
+/// Merges two sorted sequences into one sorted sequence.
+pub(crate) fn union<S: Copy, X: Iterator<Item = (Byte, S)>, Y: Iterator<Item = (Byte, S)>>(
+    xs: X,
+    ys: Y,
+) -> UnionIter<X, Y> {
+    UnionIter { xs: xs.peekable(), ys: ys.peekable() }
+}
+
+pub(crate) struct UnionIter<X: Iterator, Y: Iterator> {
+    xs: Peekable<X>,
+    ys: Peekable<Y>,
+}
+
+// FIXME(jswrenn) we'd likely benefit from specializing try_fold here.
+impl<S: Copy, X: Iterator<Item = (Byte, S)>, Y: Iterator<Item = (Byte, S)>> Iterator
+    for UnionIter<X, Y>
+{
+    type Item = (Byte, (Option<S>, Option<S>));
 
-                // FIXME(@joshlf): If this is contiguous with the previous run
-                // and has the same `dst`, just merge it into that run rather
-                // than adding a new one.
-                runs.push(Run::from_inclusive_exclusive(idx..end, dst));
-                idx = end;
+    fn next(&mut self) -> Option<Self::Item> {
+        use std::cmp::{self, Ordering};
 
-                if idx >= s_end {
-                    s = s_rest;
+        let ret;
+        match (self.xs.peek_mut(), self.ys.peek_mut()) {
+            (None, None) => {
+                ret = None;
+            }
+            (Some(x), None) => {
+                ret = Some((x.0, (Some(x.1), None)));
+                self.xs.next();
+            }
+            (None, Some(y)) => {
+                ret = Some((y.0, (None, Some(y.1))));
+                self.ys.next();
+            }
+            (Some(x), Some(y)) => {
+                let start;
+                let end;
+                let dst;
+                match x.0.start.cmp(&y.0.start) {
+                    Ordering::Less => {
+                        start = x.0.start;
+                        end = cmp::min(x.0.end, y.0.start);
+                        dst = (Some(x.1), None);
+                    }
+                    Ordering::Greater => {
+                        start = y.0.start;
+                        end = cmp::min(x.0.start, y.0.end);
+                        dst = (None, Some(y.1));
+                    }
+                    Ordering::Equal => {
+                        start = x.0.start;
+                        end = cmp::min(x.0.end, y.0.end);
+                        dst = (Some(x.1), Some(y.1));
+                    }
                 }
-                if idx >= o_end {
-                    o = o_rest;
+                ret = Some((Byte { start, end }, dst));
+                if start == x.0.start {
+                    x.0.start = end;
+                }
+                if start == y.0.start {
+                    y.0.start = end;
+                }
+                if x.0.is_empty() {
+                    self.xs.next();
+                }
+                if y.0.is_empty() {
+                    self.ys.next();
                 }
             }
-
-            // At this point, either `s` or `o` have been exhausted, so the
-            // remaining elements in the other slice are guaranteed to be
-            // non-overlapping. We can add all remaining runs to `runs` with no
-            // further processing.
-            if let Ok(idx) = u8::try_from(idx) {
-                let (slc, map) = if !s.is_empty() {
-                    let map: fn(_) -> _ = |st| (Some(st), None);
-                    (s, map)
-                } else {
-                    let map: fn(_) -> _ = |st| (None, Some(st));
-                    (o, map)
-                };
-                runs.extend(slc.iter().map(|run| run.clamp_lower(idx).map_state(map)));
-            }
-
-            EdgeSet { runs, uninit }
         }
+        ret
     }
 }
diff --git a/compiler/rustc_transmute/src/layout/mod.rs b/compiler/rustc_transmute/src/layout/mod.rs
index 4d5f630ae22..c08bf440734 100644
--- a/compiler/rustc_transmute/src/layout/mod.rs
+++ b/compiler/rustc_transmute/src/layout/mod.rs
@@ -6,61 +6,61 @@ pub(crate) mod tree;
 pub(crate) use tree::Tree;
 
 pub(crate) mod dfa;
-pub(crate) use dfa::Dfa;
+pub(crate) use dfa::{Dfa, union};
 
 #[derive(Debug)]
 pub(crate) struct Uninhabited;
 
-/// A range of byte values, or the uninit byte.
+/// A range of byte values (including an uninit byte value).
 #[derive(Hash, Eq, PartialEq, Ord, PartialOrd, Clone, Copy)]
 pub(crate) struct Byte {
-    // An inclusive-inclusive range. We use this instead of `RangeInclusive`
-    // because `RangeInclusive: !Copy`.
+    // An inclusive-exclusive range. We use this instead of `Range` because `Range: !Copy`.
     //
-    // `None` means uninit.
-    //
-    // FIXME(@joshlf): Optimize this representation. Some pairs of values (where
-    // `lo > hi`) are illegal, and we could use these to represent `None`.
-    range: Option<(u8, u8)>,
+    // Uninit byte value is represented by 256.
+    pub(crate) start: u16,
+    pub(crate) end: u16,
 }
 
 impl Byte {
+    const UNINIT: u16 = 256;
+
+    #[inline]
     fn new(range: RangeInclusive<u8>) -> Self {
-        Self { range: Some((*range.start(), *range.end())) }
+        let start: u16 = (*range.start()).into();
+        let end: u16 = (*range.end()).into();
+        Byte { start, end: end + 1 }
     }
 
+    #[inline]
     fn from_val(val: u8) -> Self {
-        Self { range: Some((val, val)) }
+        let val: u16 = val.into();
+        Byte { start: val, end: val + 1 }
     }
 
-    pub(crate) fn uninit() -> Byte {
-        Byte { range: None }
+    #[inline]
+    fn uninit() -> Byte {
+        Byte { start: 0, end: Self::UNINIT + 1 }
     }
 
-    /// Returns `None` if `self` is the uninit byte.
-    pub(crate) fn range(&self) -> Option<RangeInclusive<u8>> {
-        self.range.map(|(lo, hi)| lo..=hi)
+    #[inline]
+    fn is_empty(&self) -> bool {
+        self.start == self.end
     }
 
-    /// Are any of the values in `self` transmutable into `other`?
-    ///
-    /// Note two special cases: An uninit byte is only transmutable into another
-    /// uninit byte. Any byte is transmutable into an uninit byte.
-    pub(crate) fn transmutable_into(&self, other: &Byte) -> bool {
-        match (self.range, other.range) {
-            (None, None) => true,
-            (None, Some(_)) => false,
-            (Some(_), None) => true,
-            (Some((slo, shi)), Some((olo, ohi))) => slo <= ohi && olo <= shi,
-        }
+    #[inline]
+    fn contains_uninit(&self) -> bool {
+        self.start <= Self::UNINIT && Self::UNINIT < self.end
     }
 }
 
 impl fmt::Debug for Byte {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        match self.range {
-            None => write!(f, "uninit"),
-            Some((lo, hi)) => write!(f, "{lo}..={hi}"),
+        if self.start == Self::UNINIT && self.end == Self::UNINIT + 1 {
+            write!(f, "uninit")
+        } else if self.start <= Self::UNINIT && self.end == Self::UNINIT + 1 {
+            write!(f, "{}..{}|uninit", self.start, self.end - 1)
+        } else {
+            write!(f, "{}..{}", self.start, self.end)
         }
     }
 }
@@ -72,6 +72,7 @@ impl From<RangeInclusive<u8>> for Byte {
 }
 
 impl From<u8> for Byte {
+    #[inline]
     fn from(src: u8) -> Self {
         Self::from_val(src)
     }
diff --git a/compiler/rustc_transmute/src/maybe_transmutable/mod.rs b/compiler/rustc_transmute/src/maybe_transmutable/mod.rs
index 0a19cccc2ed..f76abe50ed3 100644
--- a/compiler/rustc_transmute/src/maybe_transmutable/mod.rs
+++ b/compiler/rustc_transmute/src/maybe_transmutable/mod.rs
@@ -1,14 +1,11 @@
-use std::rc::Rc;
-use std::{cmp, iter};
-
-use itertools::Either;
+use rustc_data_structures::stack::ensure_sufficient_stack;
 use tracing::{debug, instrument, trace};
 
 pub(crate) mod query_context;
 #[cfg(test)]
 mod tests;
 
-use crate::layout::{self, Byte, Def, Dfa, Ref, Tree, dfa};
+use crate::layout::{self, Def, Dfa, Ref, Tree, dfa, union};
 use crate::maybe_transmutable::query_context::QueryContext;
 use crate::{Answer, Condition, Map, Reason};
 
@@ -153,230 +150,135 @@ where
         if let Some(answer) = cache.get(&(src_state, dst_state)) {
             answer.clone()
         } else {
-            debug!(?src_state, ?dst_state);
-            debug!(src = ?self.src);
-            debug!(dst = ?self.dst);
-            debug!(
-                src_transitions_len = self.src.transitions.len(),
-                dst_transitions_len = self.dst.transitions.len()
-            );
-            let answer = if dst_state == self.dst.accept {
-                // truncation: `size_of(Src) >= size_of(Dst)`
-                //
-                // Why is truncation OK to do? Because even though the Src is bigger, all we care about
-                // is whether we have enough data for the Dst to be valid in accordance with what its
-                // type dictates.
-                // For example, in a u8 to `()` transmutation, we have enough data available from the u8
-                // to transmute it to a `()` (though in this case does `()` really need any data to
-                // begin with? It doesn't). Same thing with u8 to fieldless struct.
-                // Now then, why is something like u8 to bool not allowed? That is not because the bool
-                // is smaller in size, but rather because those 2 bits that we are re-interpreting from
-                // the u8 could introduce invalid states for the bool type.
-                //
-                // So, if it's possible to transmute to a smaller Dst by truncating, and we can guarantee
-                // that none of the actually-used data can introduce an invalid state for Dst's type, we
-                // are able to safely transmute, even with truncation.
-                Answer::Yes
-            } else if src_state == self.src.accept {
-                // extension: `size_of(Src) <= size_of(Dst)`
-                if let Some(dst_state_prime) = self.dst.get_uninit_edge_dst(dst_state) {
-                    self.answer_memo(cache, src_state, dst_state_prime)
-                } else {
-                    Answer::No(Reason::DstIsTooBig)
-                }
+            let answer = ensure_sufficient_stack(|| self.answer_impl(cache, src_state, dst_state));
+            if let Some(..) = cache.insert((src_state, dst_state), answer.clone()) {
+                panic!("failed to correctly cache transmutability")
+            }
+            answer
+        }
+    }
+
+    fn answer_impl(
+        &self,
+        cache: &mut Map<(dfa::State, dfa::State), Answer<<C as QueryContext>::Ref>>,
+        src_state: dfa::State,
+        dst_state: dfa::State,
+    ) -> Answer<<C as QueryContext>::Ref> {
+        debug!(?src_state, ?dst_state);
+        debug!(src = ?self.src);
+        debug!(dst = ?self.dst);
+        debug!(
+            src_transitions_len = self.src.transitions.len(),
+            dst_transitions_len = self.dst.transitions.len()
+        );
+        if dst_state == self.dst.accept {
+            // truncation: `size_of(Src) >= size_of(Dst)`
+            //
+            // Why is truncation OK to do? Because even though the Src is bigger, all we care about
+            // is whether we have enough data for the Dst to be valid in accordance with what its
+            // type dictates.
+            // For example, in a u8 to `()` transmutation, we have enough data available from the u8
+            // to transmute it to a `()` (though in this case does `()` really need any data to
+            // begin with? It doesn't). Same thing with u8 to fieldless struct.
+            // Now then, why is something like u8 to bool not allowed? That is not because the bool
+            // is smaller in size, but rather because those 2 bits that we are re-interpreting from
+            // the u8 could introduce invalid states for the bool type.
+            //
+            // So, if it's possible to transmute to a smaller Dst by truncating, and we can guarantee
+            // that none of the actually-used data can introduce an invalid state for Dst's type, we
+            // are able to safely transmute, even with truncation.
+            Answer::Yes
+        } else if src_state == self.src.accept {
+            // extension: `size_of(Src) <= size_of(Dst)`
+            if let Some(dst_state_prime) = self.dst.get_uninit_edge_dst(dst_state) {
+                self.answer_memo(cache, src_state, dst_state_prime)
+            } else {
+                Answer::No(Reason::DstIsTooBig)
+            }
+        } else {
+            let src_quantifier = if self.assume.validity {
+                // if the compiler may assume that the programmer is doing additional validity checks,
+                // (e.g.: that `src != 3u8` when the destination type is `bool`)
+                // then there must exist at least one transition out of `src_state` such that the transmute is viable...
+                Quantifier::ThereExists
             } else {
-                let src_quantifier = if self.assume.validity {
-                    // if the compiler may assume that the programmer is doing additional validity checks,
-                    // (e.g.: that `src != 3u8` when the destination type is `bool`)
-                    // then there must exist at least one transition out of `src_state` such that the transmute is viable...
-                    Quantifier::ThereExists
-                } else {
-                    // if the compiler cannot assume that the programmer is doing additional validity checks,
-                    // then for all transitions out of `src_state`, such that the transmute is viable...
-                    // then there must exist at least one transition out of `dst_state` such that the transmute is viable...
-                    Quantifier::ForAll
-                };
-
-                let c = &core::cell::RefCell::new(&mut *cache);
-                let bytes_answer = src_quantifier.apply(
-                    // for each of the byte set transitions out of the `src_state`...
-                    self.src.bytes_from(src_state).flat_map(
-                        move |(src_validity, src_state_prime)| {
-                            // ...find all matching transitions out of `dst_state`.
-
-                            let Some(src_validity) = src_validity.range() else {
-                                // NOTE: We construct an iterator here rather
-                                // than just computing the value directly (via
-                                // `self.answer_memo`) so that, if the iterator
-                                // we produce from this branch is
-                                // short-circuited, we don't waste time
-                                // computing `self.answer_memo` unnecessarily.
-                                // That will specifically happen if
-                                // `src_quantifier == Quantifier::ThereExists`,
-                                // since we emit `Answer::Yes` first (before
-                                // chaining `answer_iter`).
-                                let answer_iter = if let Some(dst_state_prime) =
-                                    self.dst.get_uninit_edge_dst(dst_state)
-                                {
-                                    Either::Left(iter::once_with(move || {
-                                        let mut c = c.borrow_mut();
-                                        self.answer_memo(&mut *c, src_state_prime, dst_state_prime)
-                                    }))
-                                } else {
-                                    Either::Right(iter::once(Answer::No(
-                                        Reason::DstIsBitIncompatible,
-                                    )))
-                                };
-
-                                // When `answer == Answer::No(...)`, there are
-                                // two cases to consider:
-                                // - If `assume.validity`, then we should
-                                //   succeed because the user is responsible for
-                                //   ensuring that the *specific* byte value
-                                //   appearing at runtime is valid for the
-                                //   destination type. When `assume.validity`,
-                                //   `src_quantifier ==
-                                //   Quantifier::ThereExists`, so adding an
-                                //   `Answer::Yes` has the effect of ensuring
-                                //   that the "there exists" is always
-                                //   satisfied.
-                                // - If `!assume.validity`, then we should fail.
-                                //   In this case, `src_quantifier ==
-                                //   Quantifier::ForAll`, so adding an
-                                //   `Answer::Yes` has no effect.
-                                return Either::Left(iter::once(Answer::Yes).chain(answer_iter));
-                            };
-
-                            #[derive(Copy, Clone, Debug)]
-                            struct Accum {
-                                // The number of matching byte edges that we
-                                // have found in the destination so far.
-                                sum: usize,
-                                found_uninit: bool,
+                // if the compiler cannot assume that the programmer is doing additional validity checks,
+                // then for all transitions out of `src_state`, such that the transmute is viable...
+                // then there must exist at least one transition out of `dst_state` such that the transmute is viable...
+                Quantifier::ForAll
+            };
+
+            let bytes_answer = src_quantifier.apply(
+                union(self.src.bytes_from(src_state), self.dst.bytes_from(dst_state)).filter_map(
+                    |(_range, (src_state_prime, dst_state_prime))| {
+                        match (src_state_prime, dst_state_prime) {
+                            // No matching transitions in `src`. Skip.
+                            (None, _) => None,
+                            // No matching transitions in `dst`. Fail.
+                            (Some(_), None) => Some(Answer::No(Reason::DstIsBitIncompatible)),
+                            // Matching transitions. Continue with successor states.
+                            (Some(src_state_prime), Some(dst_state_prime)) => {
+                                Some(self.answer_memo(cache, src_state_prime, dst_state_prime))
                             }
+                        }
+                    },
+                ),
+            );
 
-                            let accum1 = Rc::new(std::cell::Cell::new(Accum {
-                                sum: 0,
-                                found_uninit: false,
-                            }));
-                            let accum2 = Rc::clone(&accum1);
-                            let sv = src_validity.clone();
-                            let update_accum = move |mut accum: Accum, dst_validity: Byte| {
-                                if let Some(dst_validity) = dst_validity.range() {
-                                    // Only add the part of `dst_validity` that
-                                    // overlaps with `src_validity`.
-                                    let start = cmp::max(*sv.start(), *dst_validity.start());
-                                    let end = cmp::min(*sv.end(), *dst_validity.end());
-
-                                    // We add 1 here to account for the fact
-                                    // that `end` is an inclusive bound.
-                                    accum.sum += 1 + usize::from(end.saturating_sub(start));
-                                } else {
-                                    accum.found_uninit = true;
-                                }
-                                accum
-                            };
-
-                            let answers = self
-                                .dst
-                                .states_from(dst_state, src_validity.clone())
-                                .map(move |(dst_validity, dst_state_prime)| {
-                                    let mut c = c.borrow_mut();
-                                    accum1.set(update_accum(accum1.get(), dst_validity));
-                                    let answer =
-                                        self.answer_memo(&mut *c, src_state_prime, dst_state_prime);
-                                    answer
+            // The below early returns reflect how this code would behave:
+            //   if self.assume.validity {
+            //       or(bytes_answer, refs_answer)
+            //   } else {
+            //       and(bytes_answer, refs_answer)
+            //   }
+            // ...if `refs_answer` was computed lazily. The below early
+            // returns can be deleted without impacting the correctness of
+            // the algorithm; only its performance.
+            debug!(?bytes_answer);
+            match bytes_answer {
+                Answer::No(_) if !self.assume.validity => return bytes_answer,
+                Answer::Yes if self.assume.validity => return bytes_answer,
+                _ => {}
+            };
+
+            let refs_answer = src_quantifier.apply(
+                // for each reference transition out of `src_state`...
+                self.src.refs_from(src_state).map(|(src_ref, src_state_prime)| {
+                    // ...there exists a reference transition out of `dst_state`...
+                    Quantifier::ThereExists.apply(self.dst.refs_from(dst_state).map(
+                        |(dst_ref, dst_state_prime)| {
+                            if !src_ref.is_mutable() && dst_ref.is_mutable() {
+                                Answer::No(Reason::DstIsMoreUnique)
+                            } else if !self.assume.alignment
+                                && src_ref.min_align() < dst_ref.min_align()
+                            {
+                                Answer::No(Reason::DstHasStricterAlignment {
+                                    src_min_align: src_ref.min_align(),
+                                    dst_min_align: dst_ref.min_align(),
                                 })
-                                .chain(
-                                    iter::once_with(move || {
-                                        let src_validity_len = usize::from(*src_validity.end())
-                                            - usize::from(*src_validity.start())
-                                            + 1;
-                                        let accum = accum2.get();
-
-                                        // If this condition is false, then
-                                        // there are some byte values in the
-                                        // source which have no corresponding
-                                        // transition in the destination DFA. In
-                                        // that case, we add a `No` to our list
-                                        // of answers. When
-                                        // `!self.assume.validity`, this will
-                                        // cause the query to fail.
-                                        if accum.found_uninit || accum.sum == src_validity_len {
-                                            None
-                                        } else {
-                                            Some(Answer::No(Reason::DstIsBitIncompatible))
-                                        }
-                                    })
-                                    .flatten(),
-                                );
-                            Either::Right(answers)
-                        },
-                    ),
-                );
-
-                // The below early returns reflect how this code would behave:
-                //   if self.assume.validity {
-                //       or(bytes_answer, refs_answer)
-                //   } else {
-                //       and(bytes_answer, refs_answer)
-                //   }
-                // ...if `refs_answer` was computed lazily. The below early
-                // returns can be deleted without impacting the correctness of
-                // the algorithm; only its performance.
-                debug!(?bytes_answer);
-                match bytes_answer {
-                    Answer::No(_) if !self.assume.validity => return bytes_answer,
-                    Answer::Yes if self.assume.validity => return bytes_answer,
-                    _ => {}
-                };
-
-                let refs_answer = src_quantifier.apply(
-                    // for each reference transition out of `src_state`...
-                    self.src.refs_from(src_state).map(|(src_ref, src_state_prime)| {
-                        // ...there exists a reference transition out of `dst_state`...
-                        Quantifier::ThereExists.apply(self.dst.refs_from(dst_state).map(
-                            |(dst_ref, dst_state_prime)| {
-                                if !src_ref.is_mutable() && dst_ref.is_mutable() {
-                                    Answer::No(Reason::DstIsMoreUnique)
-                                } else if !self.assume.alignment
-                                    && src_ref.min_align() < dst_ref.min_align()
-                                {
-                                    Answer::No(Reason::DstHasStricterAlignment {
-                                        src_min_align: src_ref.min_align(),
-                                        dst_min_align: dst_ref.min_align(),
-                                    })
-                                } else if dst_ref.size() > src_ref.size() {
-                                    Answer::No(Reason::DstRefIsTooBig {
+                            } else if dst_ref.size() > src_ref.size() {
+                                Answer::No(Reason::DstRefIsTooBig { src: src_ref, dst: dst_ref })
+                            } else {
+                                // ...such that `src` is transmutable into `dst`, if
+                                // `src_ref` is transmutability into `dst_ref`.
+                                and(
+                                    Answer::If(Condition::IfTransmutable {
                                         src: src_ref,
                                         dst: dst_ref,
-                                    })
-                                } else {
-                                    // ...such that `src` is transmutable into `dst`, if
-                                    // `src_ref` is transmutability into `dst_ref`.
-                                    and(
-                                        Answer::If(Condition::IfTransmutable {
-                                            src: src_ref,
-                                            dst: dst_ref,
-                                        }),
-                                        self.answer_memo(cache, src_state_prime, dst_state_prime),
-                                    )
-                                }
-                            },
-                        ))
-                    }),
-                );
-
-                if self.assume.validity {
-                    or(bytes_answer, refs_answer)
-                } else {
-                    and(bytes_answer, refs_answer)
-                }
-            };
-            if let Some(..) = cache.insert((src_state, dst_state), answer.clone()) {
-                panic!("failed to correctly cache transmutability")
+                                    }),
+                                    self.answer_memo(cache, src_state_prime, dst_state_prime),
+                                )
+                            }
+                        },
+                    ))
+                }),
+            );
+
+            if self.assume.validity {
+                or(bytes_answer, refs_answer)
+            } else {
+                and(bytes_answer, refs_answer)
             }
-            answer
         }
     }
 }
diff --git a/compiler/rustc_transmute/src/maybe_transmutable/tests.rs b/compiler/rustc_transmute/src/maybe_transmutable/tests.rs
index 992fcb7cc4c..fbb4639dbd6 100644
--- a/compiler/rustc_transmute/src/maybe_transmutable/tests.rs
+++ b/compiler/rustc_transmute/src/maybe_transmutable/tests.rs
@@ -400,16 +400,23 @@ mod r#ref {
     fn should_permit_identity_transmutation() {
         type Tree = crate::layout::Tree<Def, [(); 1]>;
 
-        let layout = Tree::Seq(vec![Tree::byte(0x00), Tree::Ref([()])]);
+        for validity in [false, true] {
+            let layout = Tree::Seq(vec![Tree::byte(0x00), Tree::Ref([()])]);
 
-        let answer = crate::maybe_transmutable::MaybeTransmutableQuery::new(
-            layout.clone(),
-            layout,
-            Assume::default(),
-            UltraMinimal::default(),
-        )
-        .answer();
-        assert_eq!(answer, Answer::If(crate::Condition::IfTransmutable { src: [()], dst: [()] }));
+            let assume = Assume { validity, ..Assume::default() };
+
+            let answer = crate::maybe_transmutable::MaybeTransmutableQuery::new(
+                layout.clone(),
+                layout,
+                assume,
+                UltraMinimal::default(),
+            )
+            .answer();
+            assert_eq!(
+                answer,
+                Answer::If(crate::Condition::IfTransmutable { src: [()], dst: [()] })
+            );
+        }
     }
 }
 
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index 16336ed530a..908fcb14cb2 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -259,13 +259,95 @@ fn layout_of_uncached<'tcx>(
                         };
 
                         layout.largest_niche = Some(niche);
-
-                        tcx.mk_layout(layout)
                     } else {
                         bug!("pattern type with range but not scalar layout: {ty:?}, {layout:?}")
                     }
                 }
+                ty::PatternKind::Or(variants) => match *variants[0] {
+                    ty::PatternKind::Range { .. } => {
+                        if let BackendRepr::Scalar(scalar) = &mut layout.backend_repr {
+                            let variants: Result<Vec<_>, _> = variants
+                                .iter()
+                                .map(|pat| match *pat {
+                                    ty::PatternKind::Range { start, end } => Ok((
+                                        extract_const_value(cx, ty, start)
+                                            .unwrap()
+                                            .try_to_bits(tcx, cx.typing_env)
+                                            .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?,
+                                        extract_const_value(cx, ty, end)
+                                            .unwrap()
+                                            .try_to_bits(tcx, cx.typing_env)
+                                            .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?,
+                                    )),
+                                    ty::PatternKind::Or(_) => {
+                                        unreachable!("mixed or patterns are not allowed")
+                                    }
+                                })
+                                .collect();
+                            let mut variants = variants?;
+                            if !scalar.is_signed() {
+                                let guar = tcx.dcx().err(format!(
+                                    "only signed integer base types are allowed for or-pattern pattern types at present"
+                                ));
+
+                                return Err(error(cx, LayoutError::ReferencesError(guar)));
+                            }
+                            variants.sort();
+                            if variants.len() != 2 {
+                                let guar = tcx
+                                .dcx()
+                                .err(format!("the only or-pattern types allowed are two range patterns that are directly connected at their overflow site"));
+
+                                return Err(error(cx, LayoutError::ReferencesError(guar)));
+                            }
+
+                            // first is the one starting at the signed in range min
+                            let mut first = variants[0];
+                            let mut second = variants[1];
+                            if second.0
+                                == layout.size.truncate(layout.size.signed_int_min() as u128)
+                            {
+                                (second, first) = (first, second);
+                            }
+
+                            if layout.size.sign_extend(first.1) >= layout.size.sign_extend(second.0)
+                            {
+                                let guar = tcx.dcx().err(format!(
+                                    "only non-overlapping pattern type ranges are allowed at present"
+                                ));
+
+                                return Err(error(cx, LayoutError::ReferencesError(guar)));
+                            }
+                            if layout.size.signed_int_max() as u128 != second.1 {
+                                let guar = tcx.dcx().err(format!(
+                                    "one pattern needs to end at `{ty}::MAX`, but was {} instead",
+                                    second.1
+                                ));
+
+                                return Err(error(cx, LayoutError::ReferencesError(guar)));
+                            }
+
+                            // Now generate a wrapping range (which aren't allowed in surface syntax).
+                            scalar.valid_range_mut().start = second.0;
+                            scalar.valid_range_mut().end = first.1;
+
+                            let niche = Niche {
+                                offset: Size::ZERO,
+                                value: scalar.primitive(),
+                                valid_range: scalar.valid_range(cx),
+                            };
+
+                            layout.largest_niche = Some(niche);
+                        } else {
+                            bug!(
+                                "pattern type with range but not scalar layout: {ty:?}, {layout:?}"
+                            )
+                        }
+                    }
+                    ty::PatternKind::Or(..) => bug!("patterns cannot have nested or patterns"),
+                },
             }
+            tcx.mk_layout(layout)
         }
 
         // Basic scalars.
diff --git a/compiler/rustc_type_ir/src/canonical.rs b/compiler/rustc_type_ir/src/canonical.rs
index 03d3194f106..67b67df4b28 100644
--- a/compiler/rustc_type_ir/src/canonical.rs
+++ b/compiler/rustc_type_ir/src/canonical.rs
@@ -34,7 +34,6 @@ pub struct CanonicalQueryInput<I: Interner, V> {
 #[derive_where(Eq; I: Interner, V: Eq)]
 #[derive_where(Debug; I: Interner, V: fmt::Debug)]
 #[derive_where(Copy; I: Interner, V: Copy)]
-#[derive(TypeVisitable_Generic, TypeFoldable_Generic)]
 #[cfg_attr(
     feature = "nightly",
     derive(Encodable_NoContext, Decodable_NoContext, HashStable_NoContext)
@@ -147,7 +146,6 @@ impl<I: Interner> CanonicalVarInfo<I> {
 /// in the type-theory sense of the term -- i.e., a "meta" type system
 /// that analyzes type-like values.
 #[derive_where(Clone, Copy, Hash, PartialEq, Eq, Debug; I: Interner)]
-#[derive(TypeVisitable_Generic, TypeFoldable_Generic)]
 #[cfg_attr(
     feature = "nightly",
     derive(Decodable_NoContext, Encodable_NoContext, HashStable_NoContext)
diff --git a/compiler/rustc_type_ir/src/flags.rs b/compiler/rustc_type_ir/src/flags.rs
index b37347354fa..7ed0f92b639 100644
--- a/compiler/rustc_type_ir/src/flags.rs
+++ b/compiler/rustc_type_ir/src/flags.rs
@@ -304,7 +304,7 @@ impl<I: Interner> FlagComputation<I> {
 
             ty::Pat(ty, pat) => {
                 self.add_ty(ty);
-                self.add_flags(pat.flags());
+                self.add_ty_pat(pat);
             }
 
             ty::Slice(tt) => self.add_ty(tt),
@@ -338,6 +338,10 @@ impl<I: Interner> FlagComputation<I> {
         }
     }
 
+    fn add_ty_pat(&mut self, pat: <I as Interner>::Pat) {
+        self.add_flags(pat.flags());
+    }
+
     fn add_predicate(&mut self, binder: ty::Binder<I, ty::PredicateKind<I>>) {
         self.bound_computation(binder, |computation, atom| computation.add_predicate_atom(atom));
     }
diff --git a/compiler/rustc_type_ir/src/interner.rs b/compiler/rustc_type_ir/src/interner.rs
index 9758cecaf6a..6410da1f740 100644
--- a/compiler/rustc_type_ir/src/interner.rs
+++ b/compiler/rustc_type_ir/src/interner.rs
@@ -113,6 +113,13 @@ pub trait Interner:
         + Relate<Self>
         + Flags
         + IntoKind<Kind = ty::PatternKind<Self>>;
+    type PatList: Copy
+        + Debug
+        + Hash
+        + Default
+        + Eq
+        + TypeVisitable<Self>
+        + SliceLike<Item = Self::Pat>;
     type Safety: Safety<Self>;
     type Abi: Abi<Self>;
 
diff --git a/compiler/rustc_type_ir/src/pattern.rs b/compiler/rustc_type_ir/src/pattern.rs
index d74a82da1f9..7e56565917c 100644
--- a/compiler/rustc_type_ir/src/pattern.rs
+++ b/compiler/rustc_type_ir/src/pattern.rs
@@ -13,4 +13,5 @@ use crate::Interner;
 )]
 pub enum PatternKind<I: Interner> {
     Range { start: I::Const, end: I::Const },
+    Or(I::PatList),
 }
diff --git a/compiler/rustc_type_ir/src/walk.rs b/compiler/rustc_type_ir/src/walk.rs
index 5683e1f1712..737550eb73e 100644
--- a/compiler/rustc_type_ir/src/walk.rs
+++ b/compiler/rustc_type_ir/src/walk.rs
@@ -89,12 +89,7 @@ fn push_inner<I: Interner>(stack: &mut TypeWalkerStack<I>, parent: I::GenericArg
             | ty::Foreign(..) => {}
 
             ty::Pat(ty, pat) => {
-                match pat.kind() {
-                    ty::PatternKind::Range { start, end } => {
-                        stack.push(end.into());
-                        stack.push(start.into());
-                    }
-                }
+                push_ty_pat::<I>(stack, pat);
                 stack.push(ty.into());
             }
             ty::Array(ty, len) => {
@@ -171,3 +166,17 @@ fn push_inner<I: Interner>(stack: &mut TypeWalkerStack<I>, parent: I::GenericArg
         },
     }
 }
+
+fn push_ty_pat<I: Interner>(stack: &mut TypeWalkerStack<I>, pat: I::Pat) {
+    match pat.kind() {
+        ty::PatternKind::Range { start, end } => {
+            stack.push(end.into());
+            stack.push(start.into());
+        }
+        ty::PatternKind::Or(pats) => {
+            for pat in pats.iter() {
+                push_ty_pat::<I>(stack, pat)
+            }
+        }
+    }
+}