about summary refs log tree commit diff
path: root/compiler/rustc_mir_transform/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_mir_transform/src')
-rw-r--r--compiler/rustc_mir_transform/src/abort_unwinding_calls.rs4
-rw-r--r--compiler/rustc_mir_transform/src/add_subtyping_projections.rs70
-rw-r--r--compiler/rustc_mir_transform/src/check_const_item_mutation.rs16
-rw-r--r--compiler/rustc_mir_transform/src/check_packed_ref.rs9
-rw-r--r--compiler/rustc_mir_transform/src/check_unsafety.rs9
-rw-r--r--compiler/rustc_mir_transform/src/const_debuginfo.rs4
-rw-r--r--compiler/rustc_mir_transform/src/const_prop.rs40
-rw-r--r--compiler/rustc_mir_transform/src/const_prop_lint.rs68
-rw-r--r--compiler/rustc_mir_transform/src/copy_prop.rs5
-rw-r--r--compiler/rustc_mir_transform/src/coroutine.rs (renamed from compiler/rustc_mir_transform/src/generator.rs)350
-rw-r--r--compiler/rustc_mir_transform/src/cost_checker.rs98
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters.rs277
-rw-r--r--compiler/rustc_mir_transform/src/coverage/graph.rs283
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs283
-rw-r--r--compiler/rustc_mir_transform/src/coverage/query.rs107
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs585
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs193
-rw-r--r--compiler/rustc_mir_transform/src/coverage/tests.rs40
-rw-r--r--compiler/rustc_mir_transform/src/cross_crate_inline.rs119
-rw-r--r--compiler/rustc_mir_transform/src/dataflow_const_prop.rs17
-rw-r--r--compiler/rustc_mir_transform/src/dead_store_elimination.rs22
-rw-r--r--compiler/rustc_mir_transform/src/deref_separator.rs2
-rw-r--r--compiler/rustc_mir_transform/src/dest_prop.rs6
-rw-r--r--compiler/rustc_mir_transform/src/early_otherwise_branch.rs1
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_box_derefs.rs2
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drops.rs186
-rw-r--r--compiler/rustc_mir_transform/src/ffi_unwind_calls.rs2
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs43
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs148
-rw-r--r--compiler/rustc_mir_transform/src/instsimplify.rs18
-rw-r--r--compiler/rustc_mir_transform/src/jump_threading.rs759
-rw-r--r--compiler/rustc_mir_transform/src/large_enums.rs3
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs99
-rw-r--r--compiler/rustc_mir_transform/src/lower_intrinsics.rs57
-rw-r--r--compiler/rustc_mir_transform/src/lower_slice_len.rs78
-rw-r--r--compiler/rustc_mir_transform/src/multiple_return_terminators.rs2
-rw-r--r--compiler/rustc_mir_transform/src/normalize_array_len.rs4
-rw-r--r--compiler/rustc_mir_transform/src/nrvo.rs2
-rw-r--r--compiler/rustc_mir_transform/src/pass_manager.rs33
-rw-r--r--compiler/rustc_mir_transform/src/ref_prop.rs9
-rw-r--r--compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs2
-rw-r--r--compiler/rustc_mir_transform/src/remove_uninit_drops.rs7
-rw-r--r--compiler/rustc_mir_transform/src/remove_zsts.rs9
-rw-r--r--compiler/rustc_mir_transform/src/reveal_all.rs4
-rw-r--r--compiler/rustc_mir_transform/src/separate_const_switch.rs4
-rw-r--r--compiler/rustc_mir_transform/src/shim.rs18
-rw-r--r--compiler/rustc_mir_transform/src/simplify.rs103
-rw-r--r--compiler/rustc_mir_transform/src/sroa.rs4
-rw-r--r--compiler/rustc_mir_transform/src/ssa.rs163
-rw-r--r--compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs25
-rw-r--r--compiler/rustc_mir_transform/src/unreachable_prop.rs2
51 files changed, 2487 insertions, 1907 deletions
diff --git a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
index 4500bb7ff0f..2b3d423ea61 100644
--- a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
+++ b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
@@ -40,7 +40,7 @@ impl<'tcx> MirPass<'tcx> for AbortUnwindingCalls {
         let body_abi = match body_ty.kind() {
             ty::FnDef(..) => body_ty.fn_sig(tcx).abi(),
             ty::Closure(..) => Abi::RustCall,
-            ty::Generator(..) => Abi::Rust,
+            ty::Coroutine(..) => Abi::Rust,
             _ => span_bug!(body.span, "unexpected body ty: {:?}", body_ty),
         };
         let body_can_unwind = layout::fn_can_unwind(tcx, Some(def_id), body_abi);
@@ -113,6 +113,6 @@ impl<'tcx> MirPass<'tcx> for AbortUnwindingCalls {
         }
 
         // We may have invalidated some `cleanup` blocks so clean those up now.
-        super::simplify::remove_dead_blocks(tcx, body);
+        super::simplify::remove_dead_blocks(body);
     }
 }
diff --git a/compiler/rustc_mir_transform/src/add_subtyping_projections.rs b/compiler/rustc_mir_transform/src/add_subtyping_projections.rs
new file mode 100644
index 00000000000..e5be7c0ca76
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/add_subtyping_projections.rs
@@ -0,0 +1,70 @@
+use crate::MirPass;
+use rustc_index::IndexVec;
+use rustc_middle::mir::patch::MirPatch;
+use rustc_middle::mir::visit::MutVisitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+pub struct Subtyper;
+
+pub struct SubTypeChecker<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    patcher: MirPatch<'tcx>,
+    local_decls: &'a IndexVec<Local, LocalDecl<'tcx>>,
+}
+
+impl<'a, 'tcx> MutVisitor<'tcx> for SubTypeChecker<'a, 'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_assign(
+        &mut self,
+        place: &mut Place<'tcx>,
+        rvalue: &mut Rvalue<'tcx>,
+        location: Location,
+    ) {
+        // We don't need to do anything for deref temps as they are
+        // not part of the source code, but used for desugaring purposes.
+        if self.local_decls[place.local].is_deref_temp() {
+            return;
+        }
+        let mut place_ty = place.ty(self.local_decls, self.tcx).ty;
+        let mut rval_ty = rvalue.ty(self.local_decls, self.tcx);
+        // Not erasing this causes `Free Regions` errors in validator,
+        // when rval is `ReStatic`.
+        rval_ty = self.tcx.erase_regions_ty(rval_ty);
+        place_ty = self.tcx.erase_regions(place_ty);
+        if place_ty != rval_ty {
+            let temp = self
+                .patcher
+                .new_temp(rval_ty, self.local_decls[place.as_ref().local].source_info.span);
+            let new_place = Place::from(temp);
+            self.patcher.add_assign(location, new_place, rvalue.clone());
+            let subtyped = new_place.project_deeper(&[ProjectionElem::Subtype(place_ty)], self.tcx);
+            *rvalue = Rvalue::Use(Operand::Move(subtyped));
+        }
+    }
+}
+
+// Aim here is to do this kind of transformation:
+//
+// let place: place_ty = rval;
+// // gets transformed to
+// let temp: rval_ty = rval;
+// let place: place_ty = temp as place_ty;
+pub fn subtype_finder<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+    let patch = MirPatch::new(body);
+    let mut checker = SubTypeChecker { tcx, patcher: patch, local_decls: &body.local_decls };
+
+    for (bb, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() {
+        checker.visit_basic_block_data(bb, data);
+    }
+    checker.patcher.apply(body);
+}
+
+impl<'tcx> MirPass<'tcx> for Subtyper {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        subtype_finder(tcx, body);
+    }
+}
diff --git a/compiler/rustc_mir_transform/src/check_const_item_mutation.rs b/compiler/rustc_mir_transform/src/check_const_item_mutation.rs
index b79150737d6..61bf530f11c 100644
--- a/compiler/rustc_mir_transform/src/check_const_item_mutation.rs
+++ b/compiler/rustc_mir_transform/src/check_const_item_mutation.rs
@@ -97,13 +97,15 @@ impl<'tcx> Visitor<'tcx> for ConstMutationChecker<'_, 'tcx> {
             // so emitting a lint would be redundant.
             if !lhs.projection.is_empty() {
                 if let Some(def_id) = self.is_const_item_without_destructor(lhs.local)
-                    && let Some((lint_root, span, item)) = self.should_lint_const_item_usage(&lhs, def_id, loc) {
-                        self.tcx.emit_spanned_lint(
-                            CONST_ITEM_MUTATION,
-                            lint_root,
-                            span,
-                            errors::ConstMutate::Modify { konst: item }
-                        );
+                    && let Some((lint_root, span, item)) =
+                        self.should_lint_const_item_usage(&lhs, def_id, loc)
+                {
+                    self.tcx.emit_spanned_lint(
+                        CONST_ITEM_MUTATION,
+                        lint_root,
+                        span,
+                        errors::ConstMutate::Modify { konst: item },
+                    );
                 }
             }
             // We are looking for MIR of the form:
diff --git a/compiler/rustc_mir_transform/src/check_packed_ref.rs b/compiler/rustc_mir_transform/src/check_packed_ref.rs
index 2e6cf603d59..9ee0a704071 100644
--- a/compiler/rustc_mir_transform/src/check_packed_ref.rs
+++ b/compiler/rustc_mir_transform/src/check_packed_ref.rs
@@ -46,9 +46,14 @@ impl<'tcx> Visitor<'tcx> for PackedRefChecker<'_, 'tcx> {
                     // If we ever reach here it means that the generated derive
                     // code is somehow doing an unaligned reference, which it
                     // shouldn't do.
-                    span_bug!(self.source_info.span, "builtin derive created an unaligned reference");
+                    span_bug!(
+                        self.source_info.span,
+                        "builtin derive created an unaligned reference"
+                    );
                 } else {
-                    self.tcx.sess.emit_err(errors::UnalignedPackedRef { span: self.source_info.span });
+                    self.tcx
+                        .sess
+                        .emit_err(errors::UnalignedPackedRef { span: self.source_info.span });
                 }
             }
         }
diff --git a/compiler/rustc_mir_transform/src/check_unsafety.rs b/compiler/rustc_mir_transform/src/check_unsafety.rs
index bacabc62ee4..8872f9a97d7 100644
--- a/compiler/rustc_mir_transform/src/check_unsafety.rs
+++ b/compiler/rustc_mir_transform/src/check_unsafety.rs
@@ -56,7 +56,7 @@ impl<'tcx> Visitor<'tcx> for UnsafetyChecker<'_, 'tcx> {
             | TerminatorKind::Drop { .. }
             | TerminatorKind::Yield { .. }
             | TerminatorKind::Assert { .. }
-            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::CoroutineDrop
             | TerminatorKind::UnwindResume
             | TerminatorKind::UnwindTerminate(_)
             | TerminatorKind::Return
@@ -128,7 +128,7 @@ impl<'tcx> Visitor<'tcx> for UnsafetyChecker<'_, 'tcx> {
                         ),
                     }
                 }
-                &AggregateKind::Closure(def_id, _) | &AggregateKind::Generator(def_id, _, _) => {
+                &AggregateKind::Closure(def_id, _) | &AggregateKind::Coroutine(def_id, _, _) => {
                     let def_id = def_id.expect_local();
                     let UnsafetyCheckResult { violations, used_unsafe_blocks, .. } =
                         self.tcx.unsafety_check_result(def_id);
@@ -179,7 +179,7 @@ impl<'tcx> Visitor<'tcx> for UnsafetyChecker<'_, 'tcx> {
         // Check the base local: it might be an unsafe-to-access static. We only check derefs of the
         // temporary holding the static pointer to avoid duplicate errors
         // <https://github.com/rust-lang/rust/pull/78068#issuecomment-731753506>.
-        if decl.internal && place.projection.first() == Some(&ProjectionElem::Deref) {
+        if place.projection.first() == Some(&ProjectionElem::Deref) {
             // If the projection root is an artificial local that we introduced when
             // desugaring `static`, give a more specific error message
             // (avoid the general "raw pointer" clause below, that would only be confusing).
@@ -540,8 +540,7 @@ pub fn check_unsafety(tcx: TyCtxt<'_>, def_id: LocalDefId) {
                         && let BlockCheckMode::UnsafeBlock(_) = block.rules
                     {
                         true
-                    }
-                    else if let Some(sig) = tcx.hir().fn_sig_by_hir_id(*id)
+                    } else if let Some(sig) = tcx.hir().fn_sig_by_hir_id(*id)
                         && sig.header.is_unsafe()
                     {
                         true
diff --git a/compiler/rustc_mir_transform/src/const_debuginfo.rs b/compiler/rustc_mir_transform/src/const_debuginfo.rs
index 40cd2825408..e4e4270c499 100644
--- a/compiler/rustc_mir_transform/src/const_debuginfo.rs
+++ b/compiler/rustc_mir_transform/src/const_debuginfo.rs
@@ -55,7 +55,9 @@ fn find_optimization_opportunities<'tcx>(body: &Body<'tcx>) -> Vec<(Local, Const
 
     let mut locals_to_debuginfo = BitSet::new_empty(body.local_decls.len());
     for debuginfo in &body.var_debug_info {
-        if let VarDebugInfoContents::Place(p) = debuginfo.value && let Some(l) = p.as_local() {
+        if let VarDebugInfoContents::Place(p) = debuginfo.value
+            && let Some(l) = p.as_local()
+        {
             locals_to_debuginfo.insert(l);
         }
     }
diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs
index 4fc78b28580..53c0d0dea29 100644
--- a/compiler/rustc_mir_transform/src/const_prop.rs
+++ b/compiler/rustc_mir_transform/src/const_prop.rs
@@ -2,8 +2,6 @@
 //! assertion failures
 
 use either::Right;
-
-use rustc_const_eval::const_eval::CheckAlignment;
 use rustc_const_eval::ReportErrorExt;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_hir::def::DefKind;
@@ -16,7 +14,7 @@ use rustc_middle::mir::*;
 use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
 use rustc_middle::ty::{self, GenericArgs, Instance, ParamEnv, Ty, TyCtxt, TypeVisitableExt};
 use rustc_span::{def_id::DefId, Span};
-use rustc_target::abi::{self, Align, HasDataLayout, Size, TargetDataLayout};
+use rustc_target::abi::{self, HasDataLayout, Size, TargetDataLayout};
 use rustc_target::spec::abi::Abi as CallAbi;
 
 use crate::dataflow_const_prop::Patch;
@@ -84,11 +82,11 @@ impl<'tcx> MirPass<'tcx> for ConstProp {
             return;
         }
 
-        // FIXME(welseywiser) const prop doesn't work on generators because of query cycles
+        // FIXME(welseywiser) const prop doesn't work on coroutines because of query cycles
         // computing their layout.
-        let is_generator = def_kind == DefKind::Generator;
-        if is_generator {
-            trace!("ConstProp skipped for generator {:?}", def_id);
+        let is_coroutine = def_kind == DefKind::Coroutine;
+        if is_coroutine {
+            trace!("ConstProp skipped for coroutine {:?}", def_id);
             return;
         }
 
@@ -141,27 +139,14 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
     type MemoryKind = !;
 
     #[inline(always)]
-    fn enforce_alignment(_ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment {
-        // We do not check for alignment to avoid having to carry an `Align`
-        // in `ConstValue::Indirect`.
-        CheckAlignment::No
+    fn enforce_alignment(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+        false // no reason to enforce alignment
     }
 
     #[inline(always)]
     fn enforce_validity(_ecx: &InterpCx<'mir, 'tcx, Self>, _layout: TyAndLayout<'tcx>) -> bool {
         false // for now, we don't enforce validity
     }
-    fn alignment_check_failed(
-        ecx: &InterpCx<'mir, 'tcx, Self>,
-        _has: Align,
-        _required: Align,
-        _check: CheckAlignment,
-    ) -> InterpResult<'tcx, ()> {
-        span_bug!(
-            ecx.cur_span(),
-            "`alignment_check_failed` called when no alignment check requested"
-        )
-    }
 
     fn load_mir(
         _ecx: &InterpCx<'mir, 'tcx, Self>,
@@ -527,7 +512,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
 
     fn replace_with_const(&mut self, place: Place<'tcx>) -> Option<Const<'tcx>> {
         // This will return None if the above `const_prop` invocation only "wrote" a
-        // type whose creation requires no write. E.g. a generator whose initial state
+        // type whose creation requires no write. E.g. a coroutine whose initial state
         // consists solely of uninitialized memory (so it doesn't capture any locals).
         let value = self.get_const(place)?;
         if !self.tcx.consider_optimizing(|| format!("ConstantPropagation - {value:?}")) {
@@ -699,7 +684,9 @@ impl<'tcx> Visitor<'tcx> for CanConstProp {
 impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
     fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
         self.super_operand(operand, location);
-        if let Some(place) = operand.place() && let Some(value) = self.replace_with_const(place) {
+        if let Some(place) = operand.place()
+            && let Some(value) = self.replace_with_const(place)
+        {
             self.patch.before_effect.insert((location, place), value);
         }
     }
@@ -733,7 +720,10 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
                     if let Rvalue::Use(Operand::Constant(c)) = rvalue
                         && let Const::Val(..) = c.const_
                     {
-                        trace!("skipping replace of Rvalue::Use({:?} because it is already a const", c);
+                        trace!(
+                            "skipping replace of Rvalue::Use({:?} because it is already a const",
+                            c
+                        );
                     } else if let Some(operand) = self.replace_with_const(*place) {
                         self.patch.assignments.insert(location, operand);
                     }
diff --git a/compiler/rustc_mir_transform/src/const_prop_lint.rs b/compiler/rustc_mir_transform/src/const_prop_lint.rs
index 64e262c6c93..a23ba9c4aa9 100644
--- a/compiler/rustc_mir_transform/src/const_prop_lint.rs
+++ b/compiler/rustc_mir_transform/src/const_prop_lint.rs
@@ -22,7 +22,6 @@ use rustc_middle::ty::{
 };
 use rustc_span::Span;
 use rustc_target::abi::{HasDataLayout, Size, TargetDataLayout};
-use rustc_trait_selection::traits;
 
 use crate::const_prop::CanConstProp;
 use crate::const_prop::ConstPropMachine;
@@ -35,9 +34,9 @@ use crate::MirLint;
 /// Severely regress performance.
 const MAX_ALLOC_LIMIT: u64 = 1024;
 
-pub struct ConstProp;
+pub struct ConstPropLint;
 
-impl<'tcx> MirLint<'tcx> for ConstProp {
+impl<'tcx> MirLint<'tcx> for ConstPropLint {
     fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
         if body.tainted_by_errors.is_some() {
             return;
@@ -49,61 +48,25 @@ impl<'tcx> MirLint<'tcx> for ConstProp {
         }
 
         let def_id = body.source.def_id().expect_local();
-        let is_fn_like = tcx.def_kind(def_id).is_fn_like();
-        let is_assoc_const = tcx.def_kind(def_id) == DefKind::AssocConst;
+        let def_kind = tcx.def_kind(def_id);
+        let is_fn_like = def_kind.is_fn_like();
+        let is_assoc_const = def_kind == DefKind::AssocConst;
 
         // Only run const prop on functions, methods, closures and associated constants
         if !is_fn_like && !is_assoc_const {
             // skip anon_const/statics/consts because they'll be evaluated by miri anyway
-            trace!("ConstProp skipped for {:?}", def_id);
+            trace!("ConstPropLint skipped for {:?}", def_id);
             return;
         }
 
-        let is_generator = tcx.type_of(def_id.to_def_id()).instantiate_identity().is_generator();
-        // FIXME(welseywiser) const prop doesn't work on generators because of query cycles
+        // FIXME(welseywiser) const prop doesn't work on coroutines because of query cycles
         // computing their layout.
-        if is_generator {
-            trace!("ConstProp skipped for generator {:?}", def_id);
+        if let DefKind::Coroutine = def_kind {
+            trace!("ConstPropLint skipped for coroutine {:?}", def_id);
             return;
         }
 
-        // Check if it's even possible to satisfy the 'where' clauses
-        // for this item.
-        // This branch will never be taken for any normal function.
-        // However, it's possible to `#!feature(trivial_bounds)]` to write
-        // a function with impossible to satisfy clauses, e.g.:
-        // `fn foo() where String: Copy {}`
-        //
-        // We don't usually need to worry about this kind of case,
-        // since we would get a compilation error if the user tried
-        // to call it. However, since we can do const propagation
-        // even without any calls to the function, we need to make
-        // sure that it even makes sense to try to evaluate the body.
-        // If there are unsatisfiable where clauses, then all bets are
-        // off, and we just give up.
-        //
-        // We manually filter the predicates, skipping anything that's not
-        // "global". We are in a potentially generic context
-        // (e.g. we are evaluating a function without substituting generic
-        // parameters, so this filtering serves two purposes:
-        //
-        // 1. We skip evaluating any predicates that we would
-        // never be able prove are unsatisfiable (e.g. `<T as Foo>`
-        // 2. We avoid trying to normalize predicates involving generic
-        // parameters (e.g. `<T as Foo>::MyItem`). This can confuse
-        // the normalization code (leading to cycle errors), since
-        // it's usually never invoked in this way.
-        let predicates = tcx
-            .predicates_of(def_id.to_def_id())
-            .predicates
-            .iter()
-            .filter_map(|(p, _)| if p.is_global() { Some(*p) } else { None });
-        if traits::impossible_predicates(tcx, traits::elaborate(tcx, predicates).collect()) {
-            trace!("ConstProp skipped for {:?}: found unsatisfiable predicates", def_id);
-            return;
-        }
-
-        trace!("ConstProp starting for {:?}", def_id);
+        trace!("ConstPropLint starting for {:?}", def_id);
 
         // FIXME(oli-obk, eddyb) Optimize locals (or even local paths) to hold
         // constants, instead of just checking for const-folding succeeding.
@@ -112,7 +75,7 @@ impl<'tcx> MirLint<'tcx> for ConstProp {
         let mut linter = ConstPropagator::new(body, tcx);
         linter.visit_body(body);
 
-        trace!("ConstProp done for {:?}", def_id);
+        trace!("ConstPropLint done for {:?}", def_id);
     }
 }
 
@@ -664,9 +627,10 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
             }
             TerminatorKind::SwitchInt { ref discr, ref targets } => {
                 if let Some(ref value) = self.eval_operand(&discr, location)
-                  && let Some(value_const) = self.use_ecx(location, |this| this.ecx.read_scalar(value))
-                  && let Ok(constant) = value_const.try_to_int()
-                  && let Ok(constant) = constant.to_bits(constant.size())
+                    && let Some(value_const) =
+                        self.use_ecx(location, |this| this.ecx.read_scalar(value))
+                    && let Ok(constant) = value_const.try_to_int()
+                    && let Ok(constant) = constant.to_bits(constant.size())
                 {
                     // We managed to evaluate the discriminant, so we know we only need to visit
                     // one target.
@@ -684,7 +648,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
             | TerminatorKind::Unreachable
             | TerminatorKind::Drop { .. }
             | TerminatorKind::Yield { .. }
-            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::CoroutineDrop
             | TerminatorKind::FalseEdge { .. }
             | TerminatorKind::FalseUnwind { .. }
             | TerminatorKind::Call { .. }
diff --git a/compiler/rustc_mir_transform/src/copy_prop.rs b/compiler/rustc_mir_transform/src/copy_prop.rs
index 9a3798eea3b..be4af3b76f1 100644
--- a/compiler/rustc_mir_transform/src/copy_prop.rs
+++ b/compiler/rustc_mir_transform/src/copy_prop.rs
@@ -168,14 +168,15 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> {
             && self.storage_to_remove.contains(l)
         {
             stmt.make_nop();
-            return
+            return;
         }
 
         self.super_statement(stmt, loc);
 
         // Do not leave tautological assignments around.
         if let StatementKind::Assign(box (lhs, ref rhs)) = stmt.kind
-            && let Rvalue::Use(Operand::Copy(rhs) | Operand::Move(rhs)) | Rvalue::CopyForDeref(rhs) = *rhs
+            && let Rvalue::Use(Operand::Copy(rhs) | Operand::Move(rhs)) | Rvalue::CopyForDeref(rhs) =
+                *rhs
             && lhs == rhs
         {
             stmt.make_nop();
diff --git a/compiler/rustc_mir_transform/src/generator.rs b/compiler/rustc_mir_transform/src/coroutine.rs
index 8a807d786a5..fa56d59dd80 100644
--- a/compiler/rustc_mir_transform/src/generator.rs
+++ b/compiler/rustc_mir_transform/src/coroutine.rs
@@ -1,53 +1,53 @@
-//! This is the implementation of the pass which transforms generators into state machines.
+//! This is the implementation of the pass which transforms coroutines into state machines.
 //!
-//! MIR generation for generators creates a function which has a self argument which
-//! passes by value. This argument is effectively a generator type which only contains upvars and
-//! is only used for this argument inside the MIR for the generator.
+//! MIR generation for coroutines creates a function which has a self argument which
+//! passes by value. This argument is effectively a coroutine type which only contains upvars and
+//! is only used for this argument inside the MIR for the coroutine.
 //! It is passed by value to enable upvars to be moved out of it. Drop elaboration runs on that
 //! MIR before this pass and creates drop flags for MIR locals.
-//! It will also drop the generator argument (which only consists of upvars) if any of the upvars
-//! are moved out of. This pass elaborates the drops of upvars / generator argument in the case
+//! It will also drop the coroutine argument (which only consists of upvars) if any of the upvars
+//! are moved out of. This pass elaborates the drops of upvars / coroutine argument in the case
 //! that none of the upvars were moved out of. This is because we cannot have any drops of this
-//! generator in the MIR, since it is used to create the drop glue for the generator. We'd get
+//! coroutine in the MIR, since it is used to create the drop glue for the coroutine. We'd get
 //! infinite recursion otherwise.
 //!
-//! This pass creates the implementation for either the `Generator::resume` or `Future::poll`
-//! function and the drop shim for the generator based on the MIR input.
-//! It converts the generator argument from Self to &mut Self adding derefs in the MIR as needed.
-//! It computes the final layout of the generator struct which looks like this:
+//! This pass creates the implementation for either the `Coroutine::resume` or `Future::poll`
+//! function and the drop shim for the coroutine based on the MIR input.
+//! It converts the coroutine argument from Self to &mut Self adding derefs in the MIR as needed.
+//! It computes the final layout of the coroutine struct which looks like this:
 //!     First upvars are stored
-//!     It is followed by the generator state field.
+//!     It is followed by the coroutine state field.
 //!     Then finally the MIR locals which are live across a suspension point are stored.
 //!     ```ignore (illustrative)
-//!     struct Generator {
+//!     struct Coroutine {
 //!         upvars...,
 //!         state: u32,
 //!         mir_locals...,
 //!     }
 //!     ```
 //! This pass computes the meaning of the state field and the MIR locals which are live
-//! across a suspension point. There are however three hardcoded generator states:
-//!     0 - Generator have not been resumed yet
-//!     1 - Generator has returned / is completed
-//!     2 - Generator has been poisoned
+//! across a suspension point. There are however three hardcoded coroutine states:
+//!     0 - Coroutine have not been resumed yet
+//!     1 - Coroutine has returned / is completed
+//!     2 - Coroutine has been poisoned
 //!
-//! It also rewrites `return x` and `yield y` as setting a new generator state and returning
-//! `GeneratorState::Complete(x)` and `GeneratorState::Yielded(y)`,
+//! It also rewrites `return x` and `yield y` as setting a new coroutine state and returning
+//! `CoroutineState::Complete(x)` and `CoroutineState::Yielded(y)`,
 //! or `Poll::Ready(x)` and `Poll::Pending` respectively.
-//! MIR locals which are live across a suspension point are moved to the generator struct
-//! with references to them being updated with references to the generator struct.
+//! MIR locals which are live across a suspension point are moved to the coroutine struct
+//! with references to them being updated with references to the coroutine struct.
 //!
-//! The pass creates two functions which have a switch on the generator state giving
+//! The pass creates two functions which have a switch on the coroutine state giving
 //! the action to take.
 //!
-//! One of them is the implementation of `Generator::resume` / `Future::poll`.
-//! For generators with state 0 (unresumed) it starts the execution of the generator.
-//! For generators with state 1 (returned) and state 2 (poisoned) it panics.
+//! One of them is the implementation of `Coroutine::resume` / `Future::poll`.
+//! For coroutines with state 0 (unresumed) it starts the execution of the coroutine.
+//! For coroutines with state 1 (returned) and state 2 (poisoned) it panics.
 //! Otherwise it continues the execution from the last suspension point.
 //!
-//! The other function is the drop glue for the generator.
-//! For generators with state 0 (unresumed) it drops the upvars of the generator.
-//! For generators with state 1 (returned) and state 2 (poisoned) it does nothing.
+//! The other function is the drop glue for the coroutine.
+//! For coroutines with state 0 (unresumed) it drops the upvars of the coroutine.
+//! For coroutines with state 1 (returned) and state 2 (poisoned) it does nothing.
 //! Otherwise it drops all the values in scope at the last suspension point.
 
 use crate::abort_unwinding_calls;
@@ -60,7 +60,7 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 use rustc_errors::pluralize;
 use rustc_hir as hir;
 use rustc_hir::lang_items::LangItem;
-use rustc_hir::GeneratorKind;
+use rustc_hir::CoroutineKind;
 use rustc_index::bit_set::{BitMatrix, BitSet, GrowableBitSet};
 use rustc_index::{Idx, IndexVec};
 use rustc_middle::mir::dump_mir;
@@ -68,7 +68,7 @@ use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
 use rustc_middle::mir::*;
 use rustc_middle::ty::InstanceDef;
 use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt};
-use rustc_middle::ty::{GeneratorArgs, GenericArgsRef};
+use rustc_middle::ty::{CoroutineArgs, GenericArgsRef};
 use rustc_mir_dataflow::impls::{
     MaybeBorrowedLocals, MaybeLiveLocals, MaybeRequiresStorage, MaybeStorageLive,
 };
@@ -196,19 +196,19 @@ fn replace_base<'tcx>(place: &mut Place<'tcx>, new_base: Place<'tcx>, tcx: TyCtx
 
 const SELF_ARG: Local = Local::from_u32(1);
 
-/// Generator has not been resumed yet.
-const UNRESUMED: usize = GeneratorArgs::UNRESUMED;
-/// Generator has returned / is completed.
-const RETURNED: usize = GeneratorArgs::RETURNED;
-/// Generator has panicked and is poisoned.
-const POISONED: usize = GeneratorArgs::POISONED;
+/// Coroutine has not been resumed yet.
+const UNRESUMED: usize = CoroutineArgs::UNRESUMED;
+/// Coroutine has returned / is completed.
+const RETURNED: usize = CoroutineArgs::RETURNED;
+/// Coroutine has panicked and is poisoned.
+const POISONED: usize = CoroutineArgs::POISONED;
 
-/// Number of variants to reserve in generator state. Corresponds to
-/// `UNRESUMED` (beginning of a generator) and `RETURNED`/`POISONED`
-/// (end of a generator) states.
+/// Number of variants to reserve in coroutine state. Corresponds to
+/// `UNRESUMED` (beginning of a coroutine) and `RETURNED`/`POISONED`
+/// (end of a coroutine) states.
 const RESERVED_VARIANTS: usize = 3;
 
-/// A `yield` point in the generator.
+/// A `yield` point in the coroutine.
 struct SuspensionPoint<'tcx> {
     /// State discriminant used when suspending or resuming at this point.
     state: usize,
@@ -216,7 +216,7 @@ struct SuspensionPoint<'tcx> {
     resume: BasicBlock,
     /// Where to move the resume argument after resumption.
     resume_arg: Place<'tcx>,
-    /// Which block to jump to if the generator is dropped in this state.
+    /// Which block to jump to if the coroutine is dropped in this state.
     drop: Option<BasicBlock>,
     /// Set of locals that have live storage while at this suspension point.
     storage_liveness: GrowableBitSet<Local>,
@@ -228,10 +228,10 @@ struct TransformVisitor<'tcx> {
     state_adt_ref: AdtDef<'tcx>,
     state_args: GenericArgsRef<'tcx>,
 
-    // The type of the discriminant in the generator struct
+    // The type of the discriminant in the coroutine struct
     discr_ty: Ty<'tcx>,
 
-    // Mapping from Local to (type of local, generator struct index)
+    // Mapping from Local to (type of local, coroutine struct index)
     // FIXME(eddyb) This should use `IndexVec<Local, Option<_>>`.
     remap: FxHashMap<Local, (Ty<'tcx>, VariantIdx, FieldIdx)>,
 
@@ -249,9 +249,9 @@ struct TransformVisitor<'tcx> {
 }
 
 impl<'tcx> TransformVisitor<'tcx> {
-    // Make a `GeneratorState` or `Poll` variant assignment.
+    // Make a `CoroutineState` or `Poll` variant assignment.
     //
-    // `core::ops::GeneratorState` only has single element tuple variants,
+    // `core::ops::CoroutineState` only has single element tuple variants,
     // so we can just write to the downcasted first field and then set the
     // discriminant to the appropriate variant.
     fn make_state(
@@ -262,8 +262,8 @@ impl<'tcx> TransformVisitor<'tcx> {
         statements: &mut Vec<Statement<'tcx>>,
     ) {
         let idx = VariantIdx::new(match (is_return, self.is_async_kind) {
-            (true, false) => 1,  // GeneratorState::Complete
-            (false, false) => 0, // GeneratorState::Yielded
+            (true, false) => 1,  // CoroutineState::Complete
+            (false, false) => 0, // CoroutineState::Yielded
             (true, true) => 0,   // Poll::Ready
             (false, true) => 1,  // Poll::Pending
         });
@@ -285,7 +285,7 @@ impl<'tcx> TransformVisitor<'tcx> {
             return;
         }
 
-        // else: `Poll::Ready(x)`, `GeneratorState::Yielded(x)` or `GeneratorState::Complete(x)`
+        // else: `Poll::Ready(x)`, `CoroutineState::Yielded(x)` or `CoroutineState::Complete(x)`
         assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 1);
 
         statements.push(Statement {
@@ -297,7 +297,7 @@ impl<'tcx> TransformVisitor<'tcx> {
         });
     }
 
-    // Create a Place referencing a generator struct field
+    // Create a Place referencing a coroutine struct field
     fn make_field(&self, variant_index: VariantIdx, idx: FieldIdx, ty: Ty<'tcx>) -> Place<'tcx> {
         let self_place = Place::from(SELF_ARG);
         let base = self.tcx.mk_place_downcast_unnamed(self_place, variant_index);
@@ -321,7 +321,7 @@ impl<'tcx> TransformVisitor<'tcx> {
 
     // Create a statement which reads the discriminant into a temporary
     fn get_discr(&self, body: &mut Body<'tcx>) -> (Statement<'tcx>, Place<'tcx>) {
-        let temp_decl = LocalDecl::new(self.discr_ty, body.span).internal();
+        let temp_decl = LocalDecl::new(self.discr_ty, body.span);
         let local_decls_len = body.local_decls.push(temp_decl);
         let temp = Place::from(local_decls_len);
 
@@ -349,7 +349,7 @@ impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> {
         _context: PlaceContext,
         _location: Location,
     ) {
-        // Replace an Local in the remap with a generator struct access
+        // Replace an Local in the remap with a coroutine struct access
         if let Some(&(ty, variant_index, idx)) = self.remap.get(&place.local) {
             replace_base(place, self.make_field(variant_index, idx, ty), self.tcx);
         }
@@ -413,7 +413,7 @@ impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> {
     }
 }
 
-fn make_generator_state_argument_indirect<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+fn make_coroutine_state_argument_indirect<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     let gen_ty = body.local_decls.raw[1].ty;
 
     let ref_gen_ty = Ty::new_ref(
@@ -422,14 +422,14 @@ fn make_generator_state_argument_indirect<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Bo
         ty::TypeAndMut { ty: gen_ty, mutbl: Mutability::Mut },
     );
 
-    // Replace the by value generator argument
+    // Replace the by value coroutine argument
     body.local_decls.raw[1].ty = ref_gen_ty;
 
-    // Add a deref to accesses of the generator state
+    // Add a deref to accesses of the coroutine state
     DerefArgVisitor { tcx }.visit_body(body);
 }
 
-fn make_generator_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+fn make_coroutine_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     let ref_gen_ty = body.local_decls.raw[1].ty;
 
     let pin_did = tcx.require_lang_item(LangItem::Pin, Some(body.span));
@@ -437,10 +437,10 @@ fn make_generator_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body
     let args = tcx.mk_args(&[ref_gen_ty.into()]);
     let pin_ref_gen_ty = Ty::new_adt(tcx, pin_adt_ref, args);
 
-    // Replace the by ref generator argument
+    // Replace the by ref coroutine argument
     body.local_decls.raw[1].ty = pin_ref_gen_ty;
 
-    // Add the Pin field access to accesses of the generator state
+    // Add the Pin field access to accesses of the coroutine state
     PinArgVisitor { ref_gen_ty, tcx }.visit_body(body);
 }
 
@@ -465,7 +465,7 @@ fn replace_local<'tcx>(
     new_local
 }
 
-/// Transforms the `body` of the generator applying the following transforms:
+/// Transforms the `body` of the coroutine applying the following transforms:
 ///
 /// - Eliminates all the `get_context` calls that async lowering created.
 /// - Replace all `Local` `ResumeTy` types with `&mut Context<'_>` (`context_mut_ref`).
@@ -485,7 +485,7 @@ fn replace_local<'tcx>(
 ///
 /// The async lowering step and the type / lifetime inference / checking are
 /// still using the `ResumeTy` indirection for the time being, and that indirection
-/// is removed here. After this transform, the generator body only knows about `&mut Context<'_>`.
+/// is removed here. After this transform, the coroutine body only knows about `&mut Context<'_>`.
 fn transform_async_context<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     let context_mut_ref = Ty::new_task_context(tcx);
 
@@ -565,10 +565,10 @@ fn replace_resume_ty_local<'tcx>(
 
 struct LivenessInfo {
     /// Which locals are live across any suspension point.
-    saved_locals: GeneratorSavedLocals,
+    saved_locals: CoroutineSavedLocals,
 
     /// The set of saved locals live at each suspension point.
-    live_locals_at_suspension_points: Vec<BitSet<GeneratorSavedLocal>>,
+    live_locals_at_suspension_points: Vec<BitSet<CoroutineSavedLocal>>,
 
     /// Parallel vec to the above with SourceInfo for each yield terminator.
     source_info_at_suspension_points: Vec<SourceInfo>,
@@ -576,7 +576,7 @@ struct LivenessInfo {
     /// For every saved local, the set of other saved locals that are
     /// storage-live at the same time as this local. We cannot overlap locals in
     /// the layout which have conflicting storage.
-    storage_conflicts: BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
+    storage_conflicts: BitMatrix<CoroutineSavedLocal, CoroutineSavedLocal>,
 
     /// For every suspending block, the locals which are storage-live across
     /// that suspension point.
@@ -601,7 +601,7 @@ fn locals_live_across_suspend_points<'tcx>(
     // Calculate the MIR locals which have been previously
     // borrowed (even if they are still active).
     let borrowed_locals_results =
-        MaybeBorrowedLocals.into_engine(tcx, body_ref).pass_name("generator").iterate_to_fixpoint();
+        MaybeBorrowedLocals.into_engine(tcx, body_ref).pass_name("coroutine").iterate_to_fixpoint();
 
     let mut borrowed_locals_cursor = borrowed_locals_results.cloned_results_cursor(body_ref);
 
@@ -616,7 +616,7 @@ fn locals_live_across_suspend_points<'tcx>(
     // Calculate the liveness of MIR locals ignoring borrows.
     let mut liveness = MaybeLiveLocals
         .into_engine(tcx, body_ref)
-        .pass_name("generator")
+        .pass_name("coroutine")
         .iterate_to_fixpoint()
         .into_results_cursor(body_ref);
 
@@ -635,8 +635,8 @@ fn locals_live_across_suspend_points<'tcx>(
 
             if !movable {
                 // The `liveness` variable contains the liveness of MIR locals ignoring borrows.
-                // This is correct for movable generators since borrows cannot live across
-                // suspension points. However for immovable generators we need to account for
+                // This is correct for movable coroutines since borrows cannot live across
+                // suspension points. However for immovable coroutines we need to account for
                 // borrows, so we conservatively assume that all borrowed locals are live until
                 // we find a StorageDead statement referencing the locals.
                 // To do this we just union our `liveness` result with `borrowed_locals`, which
@@ -659,7 +659,7 @@ fn locals_live_across_suspend_points<'tcx>(
             requires_storage_cursor.seek_before_primary_effect(loc);
             live_locals.intersect(requires_storage_cursor.get());
 
-            // The generator argument is ignored.
+            // The coroutine argument is ignored.
             live_locals.remove(SELF_ARG);
 
             debug!("loc = {:?}, live_locals = {:?}", loc, live_locals);
@@ -674,7 +674,7 @@ fn locals_live_across_suspend_points<'tcx>(
     }
 
     debug!("live_locals_anywhere = {:?}", live_locals_at_any_suspension_point);
-    let saved_locals = GeneratorSavedLocals(live_locals_at_any_suspension_point);
+    let saved_locals = CoroutineSavedLocals(live_locals_at_any_suspension_point);
 
     // Renumber our liveness_map bitsets to include only the locals we are
     // saving.
@@ -701,21 +701,21 @@ fn locals_live_across_suspend_points<'tcx>(
 
 /// The set of `Local`s that must be saved across yield points.
 ///
-/// `GeneratorSavedLocal` is indexed in terms of the elements in this set;
-/// i.e. `GeneratorSavedLocal::new(1)` corresponds to the second local
+/// `CoroutineSavedLocal` is indexed in terms of the elements in this set;
+/// i.e. `CoroutineSavedLocal::new(1)` corresponds to the second local
 /// included in this set.
-struct GeneratorSavedLocals(BitSet<Local>);
+struct CoroutineSavedLocals(BitSet<Local>);
 
-impl GeneratorSavedLocals {
-    /// Returns an iterator over each `GeneratorSavedLocal` along with the `Local` it corresponds
+impl CoroutineSavedLocals {
+    /// Returns an iterator over each `CoroutineSavedLocal` along with the `Local` it corresponds
     /// to.
-    fn iter_enumerated(&self) -> impl '_ + Iterator<Item = (GeneratorSavedLocal, Local)> {
-        self.iter().enumerate().map(|(i, l)| (GeneratorSavedLocal::from(i), l))
+    fn iter_enumerated(&self) -> impl '_ + Iterator<Item = (CoroutineSavedLocal, Local)> {
+        self.iter().enumerate().map(|(i, l)| (CoroutineSavedLocal::from(i), l))
     }
 
     /// Transforms a `BitSet<Local>` that contains only locals saved across yield points to the
-    /// equivalent `BitSet<GeneratorSavedLocal>`.
-    fn renumber_bitset(&self, input: &BitSet<Local>) -> BitSet<GeneratorSavedLocal> {
+    /// equivalent `BitSet<CoroutineSavedLocal>`.
+    fn renumber_bitset(&self, input: &BitSet<Local>) -> BitSet<CoroutineSavedLocal> {
         assert!(self.superset(&input), "{:?} not a superset of {:?}", self.0, input);
         let mut out = BitSet::new_empty(self.count());
         for (saved_local, local) in self.iter_enumerated() {
@@ -726,17 +726,17 @@ impl GeneratorSavedLocals {
         out
     }
 
-    fn get(&self, local: Local) -> Option<GeneratorSavedLocal> {
+    fn get(&self, local: Local) -> Option<CoroutineSavedLocal> {
         if !self.contains(local) {
             return None;
         }
 
         let idx = self.iter().take_while(|&l| l < local).count();
-        Some(GeneratorSavedLocal::new(idx))
+        Some(CoroutineSavedLocal::new(idx))
     }
 }
 
-impl ops::Deref for GeneratorSavedLocals {
+impl ops::Deref for CoroutineSavedLocals {
     type Target = BitSet<Local>;
 
     fn deref(&self) -> &Self::Target {
@@ -747,13 +747,13 @@ impl ops::Deref for GeneratorSavedLocals {
 /// For every saved local, looks for which locals are StorageLive at the same
 /// time. Generates a bitset for every local of all the other locals that may be
 /// StorageLive simultaneously with that local. This is used in the layout
-/// computation; see `GeneratorLayout` for more.
+/// computation; see `CoroutineLayout` for more.
 fn compute_storage_conflicts<'mir, 'tcx>(
     body: &'mir Body<'tcx>,
-    saved_locals: &GeneratorSavedLocals,
+    saved_locals: &CoroutineSavedLocals,
     always_live_locals: BitSet<Local>,
     mut requires_storage: rustc_mir_dataflow::Results<'tcx, MaybeRequiresStorage<'_, 'mir, 'tcx>>,
-) -> BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal> {
+) -> BitMatrix<CoroutineSavedLocal, CoroutineSavedLocal> {
     assert_eq!(body.local_decls.len(), saved_locals.domain_size());
 
     debug!("compute_storage_conflicts({:?})", body.span);
@@ -775,7 +775,7 @@ fn compute_storage_conflicts<'mir, 'tcx>(
 
     let local_conflicts = visitor.local_conflicts;
 
-    // Compress the matrix using only stored locals (Local -> GeneratorSavedLocal).
+    // Compress the matrix using only stored locals (Local -> CoroutineSavedLocal).
     //
     // NOTE: Today we store a full conflict bitset for every local. Technically
     // this is twice as many bits as we need, since the relation is symmetric.
@@ -801,9 +801,9 @@ fn compute_storage_conflicts<'mir, 'tcx>(
 
 struct StorageConflictVisitor<'mir, 'tcx, 's> {
     body: &'mir Body<'tcx>,
-    saved_locals: &'s GeneratorSavedLocals,
+    saved_locals: &'s CoroutineSavedLocals,
     // FIXME(tmandry): Consider using sparse bitsets here once we have good
-    // benchmarks for generators.
+    // benchmarks for coroutines.
     local_conflicts: BitMatrix<Local, Local>,
 }
 
@@ -858,7 +858,7 @@ fn compute_layout<'tcx>(
     body: &Body<'tcx>,
 ) -> (
     FxHashMap<Local, (Ty<'tcx>, VariantIdx, FieldIdx)>,
-    GeneratorLayout<'tcx>,
+    CoroutineLayout<'tcx>,
     IndexVec<BasicBlock, Option<BitSet<Local>>>,
 ) {
     let LivenessInfo {
@@ -870,10 +870,10 @@ fn compute_layout<'tcx>(
     } = liveness;
 
     // Gather live local types and their indices.
-    let mut locals = IndexVec::<GeneratorSavedLocal, _>::new();
-    let mut tys = IndexVec::<GeneratorSavedLocal, _>::new();
+    let mut locals = IndexVec::<CoroutineSavedLocal, _>::new();
+    let mut tys = IndexVec::<CoroutineSavedLocal, _>::new();
     for (saved_local, local) in saved_locals.iter_enumerated() {
-        debug!("generator saved local {:?} => {:?}", saved_local, local);
+        debug!("coroutine saved local {:?} => {:?}", saved_local, local);
 
         locals.push(local);
         let decl = &body.local_decls[local];
@@ -895,7 +895,7 @@ fn compute_layout<'tcx>(
             _ => false,
         };
         let decl =
-            GeneratorSavedTy { ty: decl.ty, source_info: decl.source_info, ignore_for_traits };
+            CoroutineSavedTy { ty: decl.ty, source_info: decl.source_info, ignore_for_traits };
         debug!(?decl);
 
         tys.push(decl);
@@ -914,9 +914,9 @@ fn compute_layout<'tcx>(
     .copied()
     .collect();
 
-    // Build the generator variant field list.
-    // Create a map from local indices to generator struct indices.
-    let mut variant_fields: IndexVec<VariantIdx, IndexVec<FieldIdx, GeneratorSavedLocal>> =
+    // Build the coroutine variant field list.
+    // Create a map from local indices to coroutine struct indices.
+    let mut variant_fields: IndexVec<VariantIdx, IndexVec<FieldIdx, CoroutineSavedLocal>> =
         iter::repeat(IndexVec::new()).take(RESERVED_VARIANTS).collect();
     let mut remap = FxHashMap::default();
     for (suspension_point_idx, live_locals) in live_locals_at_suspension_points.iter().enumerate() {
@@ -926,7 +926,7 @@ fn compute_layout<'tcx>(
             fields.push(saved_local);
             // Note that if a field is included in multiple variants, we will
             // just use the first one here. That's fine; fields do not move
-            // around inside generators, so it doesn't matter which variant
+            // around inside coroutines, so it doesn't matter which variant
             // index we access them by.
             let idx = FieldIdx::from_usize(idx);
             remap.entry(locals[saved_local]).or_insert((tys[saved_local].ty, variant_index, idx));
@@ -934,8 +934,8 @@ fn compute_layout<'tcx>(
         variant_fields.push(fields);
         variant_source_info.push(source_info_at_suspension_points[suspension_point_idx]);
     }
-    debug!("generator variant_fields = {:?}", variant_fields);
-    debug!("generator storage_conflicts = {:#?}", storage_conflicts);
+    debug!("coroutine variant_fields = {:?}", variant_fields);
+    debug!("coroutine storage_conflicts = {:#?}", storage_conflicts);
 
     let mut field_names = IndexVec::from_elem(None, &tys);
     for var in &body.var_debug_info {
@@ -947,7 +947,7 @@ fn compute_layout<'tcx>(
         field_names.get_or_insert_with(saved_local, || var.name);
     }
 
-    let layout = GeneratorLayout {
+    let layout = CoroutineLayout {
         field_tys: tys,
         field_names,
         variant_fields,
@@ -959,7 +959,7 @@ fn compute_layout<'tcx>(
     (remap, layout, storage_liveness)
 }
 
-/// Replaces the entry point of `body` with a block that switches on the generator discriminant and
+/// Replaces the entry point of `body` with a block that switches on the coroutine discriminant and
 /// dispatches to blocks according to `cases`.
 ///
 /// After this function, the former entry point of the function will be bb1.
@@ -992,14 +992,14 @@ fn insert_switch<'tcx>(
     }
 }
 
-fn elaborate_generator_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+fn elaborate_coroutine_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     use crate::shim::DropShimElaborator;
     use rustc_middle::mir::patch::MirPatch;
     use rustc_mir_dataflow::elaborate_drops::{elaborate_drop, Unwind};
 
-    // Note that `elaborate_drops` only drops the upvars of a generator, and
+    // Note that `elaborate_drops` only drops the upvars of a coroutine, and
     // this is ok because `open_drop` can only be reached within that own
-    // generator's resume function.
+    // coroutine's resume function.
 
     let def_id = body.source.def_id();
     let param_env = tcx.param_env(def_id);
@@ -1047,7 +1047,7 @@ fn elaborate_generator_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     elaborator.patch.apply(body);
 }
 
-fn create_generator_drop_shim<'tcx>(
+fn create_coroutine_drop_shim<'tcx>(
     tcx: TyCtxt<'tcx>,
     transform: &TransformVisitor<'tcx>,
     gen_ty: Ty<'tcx>,
@@ -1070,7 +1070,7 @@ fn create_generator_drop_shim<'tcx>(
 
     for block in body.basic_blocks_mut() {
         let kind = &mut block.terminator_mut().kind;
-        if let TerminatorKind::GeneratorDrop = *kind {
+        if let TerminatorKind::CoroutineDrop = *kind {
             *kind = TerminatorKind::Return;
         }
     }
@@ -1078,9 +1078,9 @@ fn create_generator_drop_shim<'tcx>(
     // Replace the return variable
     body.local_decls[RETURN_PLACE] = LocalDecl::with_source_info(Ty::new_unit(tcx), source_info);
 
-    make_generator_state_argument_indirect(tcx, &mut body);
+    make_coroutine_state_argument_indirect(tcx, &mut body);
 
-    // Change the generator argument from &mut to *mut
+    // Change the coroutine argument from &mut to *mut
     body.local_decls[SELF_ARG] = LocalDecl::with_source_info(
         Ty::new_ptr(tcx, ty::TypeAndMut { ty: gen_ty, mutbl: hir::Mutability::Mut }),
         source_info,
@@ -1088,7 +1088,7 @@ fn create_generator_drop_shim<'tcx>(
 
     // Make sure we remove dead blocks to remove
     // unrelated code from the resume part of the function
-    simplify::remove_dead_blocks(tcx, &mut body);
+    simplify::remove_dead_blocks(&mut body);
 
     // Update the body's def to become the drop glue.
     // This needs to be updated before the AbortUnwindingCalls pass.
@@ -1104,10 +1104,10 @@ fn create_generator_drop_shim<'tcx>(
         None,
     );
 
-    // Temporary change MirSource to generator's instance so that dump_mir produces more sensible
+    // Temporary change MirSource to coroutine's instance so that dump_mir produces more sensible
     // filename.
     body.source.instance = gen_instance;
-    dump_mir(tcx, false, "generator_drop", &0, &body, |_, _| Ok(()));
+    dump_mir(tcx, false, "coroutine_drop", &0, &body, |_, _| Ok(()));
     body.source.instance = drop_instance;
 
     body
@@ -1182,7 +1182,7 @@ fn can_unwind<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
             | TerminatorKind::UnwindTerminate(_)
             | TerminatorKind::Return
             | TerminatorKind::Unreachable
-            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::CoroutineDrop
             | TerminatorKind::FalseEdge { .. }
             | TerminatorKind::FalseUnwind { .. } => {}
 
@@ -1191,7 +1191,7 @@ fn can_unwind<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
             TerminatorKind::UnwindResume => {}
 
             TerminatorKind::Yield { .. } => {
-                unreachable!("`can_unwind` called before generator transform")
+                unreachable!("`can_unwind` called before coroutine transform")
             }
 
             // These may unwind.
@@ -1206,7 +1206,7 @@ fn can_unwind<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
     false
 }
 
-fn create_generator_resume_function<'tcx>(
+fn create_coroutine_resume_function<'tcx>(
     tcx: TyCtxt<'tcx>,
     transform: TransformVisitor<'tcx>,
     body: &mut Body<'tcx>,
@@ -1214,7 +1214,7 @@ fn create_generator_resume_function<'tcx>(
 ) {
     let can_unwind = can_unwind(tcx, body);
 
-    // Poison the generator when it unwinds
+    // Poison the coroutine when it unwinds
     if can_unwind {
         let source_info = SourceInfo::outermost(body.span);
         let poison_block = body.basic_blocks_mut().push(BasicBlockData {
@@ -1253,34 +1253,34 @@ fn create_generator_resume_function<'tcx>(
     cases.insert(0, (UNRESUMED, START_BLOCK));
 
     // Panic when resumed on the returned or poisoned state
-    let generator_kind = body.generator_kind().unwrap();
+    let coroutine_kind = body.coroutine_kind().unwrap();
 
     if can_unwind {
         cases.insert(
             1,
-            (POISONED, insert_panic_block(tcx, body, ResumedAfterPanic(generator_kind))),
+            (POISONED, insert_panic_block(tcx, body, ResumedAfterPanic(coroutine_kind))),
         );
     }
 
     if can_return {
         cases.insert(
             1,
-            (RETURNED, insert_panic_block(tcx, body, ResumedAfterReturn(generator_kind))),
+            (RETURNED, insert_panic_block(tcx, body, ResumedAfterReturn(coroutine_kind))),
         );
     }
 
     insert_switch(body, cases, &transform, TerminatorKind::Unreachable);
 
-    make_generator_state_argument_indirect(tcx, body);
-    make_generator_state_argument_pinned(tcx, body);
+    make_coroutine_state_argument_indirect(tcx, body);
+    make_coroutine_state_argument_pinned(tcx, body);
 
     // Make sure we remove dead blocks to remove
     // unrelated code from the drop part of the function
-    simplify::remove_dead_blocks(tcx, body);
+    simplify::remove_dead_blocks(body);
 
     pm::run_passes_no_validate(tcx, body, &[&abort_unwinding_calls::AbortUnwindingCalls], None);
 
-    dump_mir(tcx, false, "generator_resume", &0, body, |_, _| Ok(()));
+    dump_mir(tcx, false, "coroutine_resume", &0, body, |_, _| Ok(()));
 }
 
 fn insert_clean_drop(body: &mut Body<'_>) -> BasicBlock {
@@ -1294,7 +1294,7 @@ fn insert_clean_drop(body: &mut Body<'_>) -> BasicBlock {
     };
     let source_info = SourceInfo::outermost(body.span);
 
-    // Create a block to destroy an unresumed generators. This can only destroy upvars.
+    // Create a block to destroy an unresumed coroutines. This can only destroy upvars.
     body.basic_blocks_mut().push(BasicBlockData {
         statements: Vec::new(),
         terminator: Some(Terminator { source_info, kind: term }),
@@ -1302,7 +1302,7 @@ fn insert_clean_drop(body: &mut Body<'_>) -> BasicBlock {
     })
 }
 
-/// An operation that can be performed on a generator.
+/// An operation that can be performed on a coroutine.
 #[derive(PartialEq, Copy, Clone)]
 enum Operation {
     Resume,
@@ -1381,64 +1381,64 @@ fn create_cases<'tcx>(
 }
 
 #[instrument(level = "debug", skip(tcx), ret)]
-pub(crate) fn mir_generator_witnesses<'tcx>(
+pub(crate) fn mir_coroutine_witnesses<'tcx>(
     tcx: TyCtxt<'tcx>,
     def_id: LocalDefId,
-) -> Option<GeneratorLayout<'tcx>> {
+) -> Option<CoroutineLayout<'tcx>> {
     let (body, _) = tcx.mir_promoted(def_id);
     let body = body.borrow();
     let body = &*body;
 
-    // The first argument is the generator type passed by value
+    // The first argument is the coroutine type passed by value
     let gen_ty = body.local_decls[ty::CAPTURE_STRUCT_LOCAL].ty;
 
     // Get the interior types and args which typeck computed
     let movable = match *gen_ty.kind() {
-        ty::Generator(_, _, movability) => movability == hir::Movability::Movable,
+        ty::Coroutine(_, _, movability) => movability == hir::Movability::Movable,
         ty::Error(_) => return None,
-        _ => span_bug!(body.span, "unexpected generator type {}", gen_ty),
+        _ => span_bug!(body.span, "unexpected coroutine type {}", gen_ty),
     };
 
-    // When first entering the generator, move the resume argument into its new local.
+    // When first entering the coroutine, move the resume argument into its new local.
     let always_live_locals = always_storage_live_locals(&body);
 
     let liveness_info = locals_live_across_suspend_points(tcx, body, &always_live_locals, movable);
 
     // Extract locals which are live across suspension point into `layout`
-    // `remap` gives a mapping from local indices onto generator struct indices
+    // `remap` gives a mapping from local indices onto coroutine struct indices
     // `storage_liveness` tells us which locals have live storage at suspension points
-    let (_, generator_layout, _) = compute_layout(liveness_info, body);
+    let (_, coroutine_layout, _) = compute_layout(liveness_info, body);
 
-    check_suspend_tys(tcx, &generator_layout, &body);
+    check_suspend_tys(tcx, &coroutine_layout, &body);
 
-    Some(generator_layout)
+    Some(coroutine_layout)
 }
 
 impl<'tcx> MirPass<'tcx> for StateTransform {
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         let Some(yield_ty) = body.yield_ty() else {
-            // This only applies to generators
+            // This only applies to coroutines
             return;
         };
 
-        assert!(body.generator_drop().is_none());
+        assert!(body.coroutine_drop().is_none());
 
-        // The first argument is the generator type passed by value
+        // The first argument is the coroutine type passed by value
         let gen_ty = body.local_decls.raw[1].ty;
 
         // Get the discriminant type and args which typeck computed
         let (discr_ty, movable) = match *gen_ty.kind() {
-            ty::Generator(_, args, movability) => {
-                let args = args.as_generator();
+            ty::Coroutine(_, args, movability) => {
+                let args = args.as_coroutine();
                 (args.discr_ty(tcx), movability == hir::Movability::Movable)
             }
             _ => {
-                tcx.sess.delay_span_bug(body.span, format!("unexpected generator type {gen_ty}"));
+                tcx.sess.delay_span_bug(body.span, format!("unexpected coroutine type {gen_ty}"));
                 return;
             }
         };
 
-        let is_async_kind = matches!(body.generator_kind(), Some(GeneratorKind::Async(_)));
+        let is_async_kind = matches!(body.coroutine_kind(), Some(CoroutineKind::Async(_)));
         let (state_adt_ref, state_args) = if is_async_kind {
             // Compute Poll<return_ty>
             let poll_did = tcx.require_lang_item(LangItem::Poll, None);
@@ -1446,8 +1446,8 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
             let poll_args = tcx.mk_args(&[body.return_ty().into()]);
             (poll_adt_ref, poll_args)
         } else {
-            // Compute GeneratorState<yield_ty, return_ty>
-            let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
+            // Compute CoroutineState<yield_ty, return_ty>
+            let state_did = tcx.require_lang_item(LangItem::CoroutineState, None);
             let state_adt_ref = tcx.adt_def(state_did);
             let state_args = tcx.mk_args(&[yield_ty.into(), body.return_ty().into()]);
             (state_adt_ref, state_args)
@@ -1465,8 +1465,8 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
 
         // We also replace the resume argument and insert an `Assign`.
         // This is needed because the resume argument `_2` might be live across a `yield`, in which
-        // case there is no `Assign` to it that the transform can turn into a store to the generator
-        // state. After the yield the slot in the generator state would then be uninitialized.
+        // case there is no `Assign` to it that the transform can turn into a store to the coroutine
+        // state. After the yield the slot in the coroutine state would then be uninitialized.
         let resume_local = Local::new(2);
         let resume_ty = if is_async_kind {
             Ty::new_task_context(tcx)
@@ -1475,7 +1475,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
         };
         let new_resume_local = replace_local(resume_local, resume_ty, body, tcx);
 
-        // When first entering the generator, move the resume argument into its new local.
+        // When first entering the coroutine, move the resume argument into its new local.
         let source_info = SourceInfo::outermost(body.span);
         let stmts = &mut body.basic_blocks_mut()[START_BLOCK].statements;
         stmts.insert(
@@ -1495,7 +1495,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
             locals_live_across_suspend_points(tcx, body, &always_live_locals, movable);
 
         if tcx.sess.opts.unstable_opts.validate_mir {
-            let mut vis = EnsureGeneratorFieldAssignmentsNeverAlias {
+            let mut vis = EnsureCoroutineFieldAssignmentsNeverAlias {
                 assigned_local: None,
                 saved_locals: &liveness_info.saved_locals,
                 storage_conflicts: &liveness_info.storage_conflicts,
@@ -1505,16 +1505,16 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
         }
 
         // Extract locals which are live across suspension point into `layout`
-        // `remap` gives a mapping from local indices onto generator struct indices
+        // `remap` gives a mapping from local indices onto coroutine struct indices
         // `storage_liveness` tells us which locals have live storage at suspension points
         let (remap, layout, storage_liveness) = compute_layout(liveness_info, body);
 
         let can_return = can_return(tcx, body, tcx.param_env(body.source.def_id()));
 
-        // Run the transformation which converts Places from Local to generator struct
+        // Run the transformation which converts Places from Local to coroutine struct
         // accesses for locals in `remap`.
-        // It also rewrites `return x` and `yield y` as writing a new generator state and returning
-        // either GeneratorState::Complete(x) and GeneratorState::Yielded(y),
+        // It also rewrites `return x` and `yield y` as writing a new coroutine state and returning
+        // either CoroutineState::Complete(x) and CoroutineState::Yielded(y),
         // or Poll::Ready(x) and Poll::Pending respectively depending on `is_async_kind`.
         let mut transform = TransformVisitor {
             tcx,
@@ -1541,30 +1541,30 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
             var.argument_index = None;
         }
 
-        body.generator.as_mut().unwrap().yield_ty = None;
-        body.generator.as_mut().unwrap().generator_layout = Some(layout);
+        body.coroutine.as_mut().unwrap().yield_ty = None;
+        body.coroutine.as_mut().unwrap().coroutine_layout = Some(layout);
 
-        // Insert `drop(generator_struct)` which is used to drop upvars for generators in
+        // Insert `drop(coroutine_struct)` which is used to drop upvars for coroutines in
         // the unresumed state.
-        // This is expanded to a drop ladder in `elaborate_generator_drops`.
+        // This is expanded to a drop ladder in `elaborate_coroutine_drops`.
         let drop_clean = insert_clean_drop(body);
 
-        dump_mir(tcx, false, "generator_pre-elab", &0, body, |_, _| Ok(()));
+        dump_mir(tcx, false, "coroutine_pre-elab", &0, body, |_, _| Ok(()));
 
-        // Expand `drop(generator_struct)` to a drop ladder which destroys upvars.
+        // Expand `drop(coroutine_struct)` to a drop ladder which destroys upvars.
         // If any upvars are moved out of, drop elaboration will handle upvar destruction.
         // However we need to also elaborate the code generated by `insert_clean_drop`.
-        elaborate_generator_drops(tcx, body);
+        elaborate_coroutine_drops(tcx, body);
 
-        dump_mir(tcx, false, "generator_post-transform", &0, body, |_, _| Ok(()));
+        dump_mir(tcx, false, "coroutine_post-transform", &0, body, |_, _| Ok(()));
 
-        // Create a copy of our MIR and use it to create the drop shim for the generator
-        let drop_shim = create_generator_drop_shim(tcx, &transform, gen_ty, body, drop_clean);
+        // Create a copy of our MIR and use it to create the drop shim for the coroutine
+        let drop_shim = create_coroutine_drop_shim(tcx, &transform, gen_ty, body, drop_clean);
 
-        body.generator.as_mut().unwrap().generator_drop = Some(drop_shim);
+        body.coroutine.as_mut().unwrap().coroutine_drop = Some(drop_shim);
 
-        // Create the Generator::resume / Future::poll function
-        create_generator_resume_function(tcx, transform, body, can_return);
+        // Create the Coroutine::resume / Future::poll function
+        create_coroutine_resume_function(tcx, transform, body, can_return);
 
         // Run derefer to fix Derefs that are not in the first place
         deref_finder(tcx, body);
@@ -1572,25 +1572,25 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
 }
 
 /// Looks for any assignments between locals (e.g., `_4 = _5`) that will both be converted to fields
-/// in the generator state machine but whose storage is not marked as conflicting
+/// in the coroutine state machine but whose storage is not marked as conflicting
 ///
 /// Validation needs to happen immediately *before* `TransformVisitor` is invoked, not after.
 ///
 /// This condition would arise when the assignment is the last use of `_5` but the initial
 /// definition of `_4` if we weren't extra careful to mark all locals used inside a statement as
-/// conflicting. Non-conflicting generator saved locals may be stored at the same location within
-/// the generator state machine, which would result in ill-formed MIR: the left-hand and right-hand
+/// conflicting. Non-conflicting coroutine saved locals may be stored at the same location within
+/// the coroutine state machine, which would result in ill-formed MIR: the left-hand and right-hand
 /// sides of an assignment may not alias. This caused a miscompilation in [#73137].
 ///
 /// [#73137]: https://github.com/rust-lang/rust/issues/73137
-struct EnsureGeneratorFieldAssignmentsNeverAlias<'a> {
-    saved_locals: &'a GeneratorSavedLocals,
-    storage_conflicts: &'a BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
-    assigned_local: Option<GeneratorSavedLocal>,
+struct EnsureCoroutineFieldAssignmentsNeverAlias<'a> {
+    saved_locals: &'a CoroutineSavedLocals,
+    storage_conflicts: &'a BitMatrix<CoroutineSavedLocal, CoroutineSavedLocal>,
+    assigned_local: Option<CoroutineSavedLocal>,
 }
 
-impl EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
-    fn saved_local_for_direct_place(&self, place: Place<'_>) -> Option<GeneratorSavedLocal> {
+impl EnsureCoroutineFieldAssignmentsNeverAlias<'_> {
+    fn saved_local_for_direct_place(&self, place: Place<'_>) -> Option<CoroutineSavedLocal> {
         if place.is_indirect() {
             return None;
         }
@@ -1609,7 +1609,7 @@ impl EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
     }
 }
 
-impl<'tcx> Visitor<'tcx> for EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
+impl<'tcx> Visitor<'tcx> for EnsureCoroutineFieldAssignmentsNeverAlias<'_> {
     fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) {
         let Some(lhs) = self.assigned_local else {
             // This visitor only invokes `visit_place` for the right-hand side of an assignment
@@ -1624,7 +1624,7 @@ impl<'tcx> Visitor<'tcx> for EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
 
         if !self.storage_conflicts.contains(lhs, rhs) {
             bug!(
-                "Assignment between generator saved locals whose storage is not \
+                "Assignment between coroutine saved locals whose storage is not \
                     marked as conflicting: {:?}: {:?} = {:?}",
                 location,
                 lhs,
@@ -1691,14 +1691,14 @@ impl<'tcx> Visitor<'tcx> for EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
             | TerminatorKind::Unreachable
             | TerminatorKind::Drop { .. }
             | TerminatorKind::Assert { .. }
-            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::CoroutineDrop
             | TerminatorKind::FalseEdge { .. }
             | TerminatorKind::FalseUnwind { .. } => {}
         }
     }
 }
 
-fn check_suspend_tys<'tcx>(tcx: TyCtxt<'tcx>, layout: &GeneratorLayout<'tcx>, body: &Body<'tcx>) {
+fn check_suspend_tys<'tcx>(tcx: TyCtxt<'tcx>, layout: &CoroutineLayout<'tcx>, body: &Body<'tcx>) {
     let mut linted_tys = FxHashSet::default();
 
     // We want a user-facing param-env.
diff --git a/compiler/rustc_mir_transform/src/cost_checker.rs b/compiler/rustc_mir_transform/src/cost_checker.rs
new file mode 100644
index 00000000000..9bb26693cb2
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/cost_checker.rs
@@ -0,0 +1,98 @@
+use rustc_middle::mir::visit::*;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt};
+
+const INSTR_COST: usize = 5;
+const CALL_PENALTY: usize = 25;
+const LANDINGPAD_PENALTY: usize = 50;
+const RESUME_PENALTY: usize = 45;
+
+/// Verify that the callee body is compatible with the caller.
+#[derive(Clone)]
+pub(crate) struct CostChecker<'b, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    cost: usize,
+    callee_body: &'b Body<'tcx>,
+    instance: Option<ty::Instance<'tcx>>,
+}
+
+impl<'b, 'tcx> CostChecker<'b, 'tcx> {
+    pub fn new(
+        tcx: TyCtxt<'tcx>,
+        param_env: ParamEnv<'tcx>,
+        instance: Option<ty::Instance<'tcx>>,
+        callee_body: &'b Body<'tcx>,
+    ) -> CostChecker<'b, 'tcx> {
+        CostChecker { tcx, param_env, callee_body, instance, cost: 0 }
+    }
+
+    pub fn cost(&self) -> usize {
+        self.cost
+    }
+
+    fn instantiate_ty(&self, v: Ty<'tcx>) -> Ty<'tcx> {
+        if let Some(instance) = self.instance {
+            instance.instantiate_mir(self.tcx, ty::EarlyBinder::bind(&v))
+        } else {
+            v
+        }
+    }
+}
+
+impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) {
+        // Don't count StorageLive/StorageDead in the inlining cost.
+        match statement.kind {
+            StatementKind::StorageLive(_)
+            | StatementKind::StorageDead(_)
+            | StatementKind::Deinit(_)
+            | StatementKind::Nop => {}
+            _ => self.cost += INSTR_COST,
+        }
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) {
+        let tcx = self.tcx;
+        match terminator.kind {
+            TerminatorKind::Drop { ref place, unwind, .. } => {
+                // If the place doesn't actually need dropping, treat it like a regular goto.
+                let ty = self.instantiate_ty(place.ty(self.callee_body, tcx).ty);
+                if ty.needs_drop(tcx, self.param_env) {
+                    self.cost += CALL_PENALTY;
+                    if let UnwindAction::Cleanup(_) = unwind {
+                        self.cost += LANDINGPAD_PENALTY;
+                    }
+                } else {
+                    self.cost += INSTR_COST;
+                }
+            }
+            TerminatorKind::Call { func: Operand::Constant(ref f), unwind, .. } => {
+                let fn_ty = self.instantiate_ty(f.const_.ty());
+                self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind() && tcx.is_intrinsic(def_id) {
+                    // Don't give intrinsics the extra penalty for calls
+                    INSTR_COST
+                } else {
+                    CALL_PENALTY
+                };
+                if let UnwindAction::Cleanup(_) = unwind {
+                    self.cost += LANDINGPAD_PENALTY;
+                }
+            }
+            TerminatorKind::Assert { unwind, .. } => {
+                self.cost += CALL_PENALTY;
+                if let UnwindAction::Cleanup(_) = unwind {
+                    self.cost += LANDINGPAD_PENALTY;
+                }
+            }
+            TerminatorKind::UnwindResume => self.cost += RESUME_PENALTY,
+            TerminatorKind::InlineAsm { unwind, .. } => {
+                self.cost += INSTR_COST;
+                if let UnwindAction::Cleanup(_) = unwind {
+                    self.cost += LANDINGPAD_PENALTY;
+                }
+            }
+            _ => self.cost += INSTR_COST,
+        }
+    }
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs
index d56d4ad4f1e..d07f59bc72a 100644
--- a/compiler/rustc_mir_transform/src/coverage/counters.rs
+++ b/compiler/rustc_mir_transform/src/coverage/counters.rs
@@ -1,10 +1,8 @@
 use super::Error;
 
 use super::graph;
-use super::spans;
 
 use graph::{BasicCoverageBlock, BcbBranch, CoverageGraph, TraverseCoverageGraphWithLoops};
-use spans::CoverageSpan;
 
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::graph::WithNumNodes;
@@ -21,7 +19,7 @@ const NESTED_INDENT: &str = "    ";
 #[derive(Clone)]
 pub(super) enum BcbCounter {
     Counter { id: CounterId },
-    Expression { id: ExpressionId, lhs: Operand, op: Op, rhs: Operand },
+    Expression { id: ExpressionId },
 }
 
 impl BcbCounter {
@@ -29,10 +27,10 @@ impl BcbCounter {
         matches!(self, Self::Expression { .. })
     }
 
-    pub(super) fn as_operand(&self) -> Operand {
+    pub(super) fn as_term(&self) -> CovTerm {
         match *self {
-            BcbCounter::Counter { id, .. } => Operand::Counter(id),
-            BcbCounter::Expression { id, .. } => Operand::Expression(id),
+            BcbCounter::Counter { id, .. } => CovTerm::Counter(id),
+            BcbCounter::Expression { id, .. } => CovTerm::Expression(id),
         }
     }
 }
@@ -41,17 +39,7 @@ impl Debug for BcbCounter {
     fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
         match self {
             Self::Counter { id, .. } => write!(fmt, "Counter({:?})", id.index()),
-            Self::Expression { id, lhs, op, rhs } => write!(
-                fmt,
-                "Expression({:?}) = {:?} {} {:?}",
-                id.index(),
-                lhs,
-                match op {
-                    Op::Add => "+",
-                    Op::Subtract => "-",
-                },
-                rhs,
-            ),
+            Self::Expression { id } => write!(fmt, "Expression({:?})", id.index()),
         }
     }
 }
@@ -60,7 +48,6 @@ impl Debug for BcbCounter {
 /// associated with nodes/edges in the BCB graph.
 pub(super) struct CoverageCounters {
     next_counter_id: CounterId,
-    next_expression_id: ExpressionId,
 
     /// Coverage counters/expressions that are associated with individual BCBs.
     bcb_counters: IndexVec<BasicCoverageBlock, Option<BcbCounter>>,
@@ -71,10 +58,9 @@ pub(super) struct CoverageCounters {
     /// Only used by debug assertions, to verify that BCBs with incoming edge
     /// counters do not have their own physical counters (expressions are allowed).
     bcb_has_incoming_edge_counters: BitSet<BasicCoverageBlock>,
-    /// Expression nodes that are not directly associated with any particular
-    /// BCB/edge, but are needed as operands to more complex expressions.
-    /// These are always [`BcbCounter::Expression`].
-    pub(super) intermediate_expressions: Vec<BcbCounter>,
+    /// Table of expression data, associating each expression ID with its
+    /// corresponding operator (+ or -) and its LHS/RHS operands.
+    expressions: IndexVec<ExpressionId, Expression>,
 }
 
 impl CoverageCounters {
@@ -83,24 +69,22 @@ impl CoverageCounters {
 
         Self {
             next_counter_id: CounterId::START,
-            next_expression_id: ExpressionId::START,
-
             bcb_counters: IndexVec::from_elem_n(None, num_bcbs),
             bcb_edge_counters: FxHashMap::default(),
             bcb_has_incoming_edge_counters: BitSet::new_empty(num_bcbs),
-            intermediate_expressions: Vec::new(),
+            expressions: IndexVec::new(),
         }
     }
 
     /// Makes [`BcbCounter`] `Counter`s and `Expressions` for the `BasicCoverageBlock`s directly or
-    /// indirectly associated with `CoverageSpans`, and accumulates additional `Expression`s
+    /// indirectly associated with coverage spans, and accumulates additional `Expression`s
     /// representing intermediate values.
     pub fn make_bcb_counters(
         &mut self,
         basic_coverage_blocks: &CoverageGraph,
-        coverage_spans: &[CoverageSpan],
+        bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool,
     ) -> Result<(), Error> {
-        MakeBcbCounters::new(self, basic_coverage_blocks).make_bcb_counters(coverage_spans)
+        MakeBcbCounters::new(self, basic_coverage_blocks).make_bcb_counters(bcb_has_coverage_spans)
     }
 
     fn make_counter(&mut self) -> BcbCounter {
@@ -108,35 +92,32 @@ impl CoverageCounters {
         BcbCounter::Counter { id }
     }
 
-    fn make_expression(&mut self, lhs: Operand, op: Op, rhs: Operand) -> BcbCounter {
-        let id = self.next_expression();
-        BcbCounter::Expression { id, lhs, op, rhs }
-    }
-
-    pub fn make_identity_counter(&mut self, counter_operand: Operand) -> BcbCounter {
-        self.make_expression(counter_operand, Op::Add, Operand::Zero)
+    fn make_expression(&mut self, lhs: CovTerm, op: Op, rhs: CovTerm) -> BcbCounter {
+        let id = self.expressions.push(Expression { lhs, op, rhs });
+        BcbCounter::Expression { id }
     }
 
     /// Counter IDs start from one and go up.
     fn next_counter(&mut self) -> CounterId {
         let next = self.next_counter_id;
-        self.next_counter_id = next.next_id();
+        self.next_counter_id = self.next_counter_id + 1;
         next
     }
 
-    /// Expression IDs start from 0 and go up.
-    /// (Counter IDs and Expression IDs are distinguished by the `Operand` enum.)
-    fn next_expression(&mut self) -> ExpressionId {
-        let next = self.next_expression_id;
-        self.next_expression_id = next.next_id();
-        next
+    pub(super) fn num_counters(&self) -> usize {
+        self.next_counter_id.as_usize()
+    }
+
+    #[cfg(test)]
+    pub(super) fn num_expressions(&self) -> usize {
+        self.expressions.len()
     }
 
     fn set_bcb_counter(
         &mut self,
         bcb: BasicCoverageBlock,
         counter_kind: BcbCounter,
-    ) -> Result<Operand, Error> {
+    ) -> Result<CovTerm, Error> {
         debug_assert!(
             // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
             // have an expression (to be injected into an existing `BasicBlock` represented by this
@@ -144,14 +125,14 @@ impl CoverageCounters {
             counter_kind.is_expression() || !self.bcb_has_incoming_edge_counters.contains(bcb),
             "attempt to add a `Counter` to a BCB target with existing incoming edge counters"
         );
-        let operand = counter_kind.as_operand();
+        let term = counter_kind.as_term();
         if let Some(replaced) = self.bcb_counters[bcb].replace(counter_kind) {
             Error::from_string(format!(
                 "attempt to set a BasicCoverageBlock coverage counter more than once; \
                 {bcb:?} already had counter {replaced:?}",
             ))
         } else {
-            Ok(operand)
+            Ok(term)
         }
     }
 
@@ -160,7 +141,7 @@ impl CoverageCounters {
         from_bcb: BasicCoverageBlock,
         to_bcb: BasicCoverageBlock,
         counter_kind: BcbCounter,
-    ) -> Result<Operand, Error> {
+    ) -> Result<CovTerm, Error> {
         if level_enabled!(tracing::Level::DEBUG) {
             // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
             // have an expression (to be injected into an existing `BasicBlock` represented by this
@@ -173,14 +154,14 @@ impl CoverageCounters {
             }
         }
         self.bcb_has_incoming_edge_counters.insert(to_bcb);
-        let operand = counter_kind.as_operand();
+        let term = counter_kind.as_term();
         if let Some(replaced) = self.bcb_edge_counters.insert((from_bcb, to_bcb), counter_kind) {
             Error::from_string(format!(
                 "attempt to set an edge counter more than once; from_bcb: \
                 {from_bcb:?} already had counter {replaced:?}",
             ))
         } else {
-            Ok(operand)
+            Ok(term)
         }
     }
 
@@ -188,27 +169,31 @@ impl CoverageCounters {
         self.bcb_counters[bcb].as_ref()
     }
 
-    pub(super) fn take_bcb_counter(&mut self, bcb: BasicCoverageBlock) -> Option<BcbCounter> {
-        self.bcb_counters[bcb].take()
+    pub(super) fn bcb_node_counters(
+        &self,
+    ) -> impl Iterator<Item = (BasicCoverageBlock, &BcbCounter)> {
+        self.bcb_counters
+            .iter_enumerated()
+            .filter_map(|(bcb, counter_kind)| Some((bcb, counter_kind.as_ref()?)))
     }
 
-    pub(super) fn drain_bcb_counters(
-        &mut self,
-    ) -> impl Iterator<Item = (BasicCoverageBlock, BcbCounter)> + '_ {
-        self.bcb_counters
-            .iter_enumerated_mut()
-            .filter_map(|(bcb, counter)| Some((bcb, counter.take()?)))
+    /// For each edge in the BCB graph that has an associated counter, yields
+    /// that edge's *from* and *to* nodes, and its counter.
+    pub(super) fn bcb_edge_counters(
+        &self,
+    ) -> impl Iterator<Item = (BasicCoverageBlock, BasicCoverageBlock, &BcbCounter)> {
+        self.bcb_edge_counters
+            .iter()
+            .map(|(&(from_bcb, to_bcb), counter_kind)| (from_bcb, to_bcb, counter_kind))
     }
 
-    pub(super) fn drain_bcb_edge_counters(
-        &mut self,
-    ) -> impl Iterator<Item = ((BasicCoverageBlock, BasicCoverageBlock), BcbCounter)> + '_ {
-        self.bcb_edge_counters.drain()
+    pub(super) fn take_expressions(&mut self) -> IndexVec<ExpressionId, Expression> {
+        std::mem::take(&mut self.expressions)
     }
 }
 
 /// Traverse the `CoverageGraph` and add either a `Counter` or `Expression` to every BCB, to be
-/// injected with `CoverageSpan`s. `Expressions` have no runtime overhead, so if a viable expression
+/// injected with coverage spans. `Expressions` have no runtime overhead, so if a viable expression
 /// (adding or subtracting two other counters or expressions) can compute the same result as an
 /// embedded counter, an `Expression` should be used.
 struct MakeBcbCounters<'a> {
@@ -234,17 +219,14 @@ impl<'a> MakeBcbCounters<'a> {
     /// Returns any non-code-span expressions created to represent intermediate values (such as to
     /// add two counters so the result can be subtracted from another counter), or an Error with
     /// message for subsequent debugging.
-    fn make_bcb_counters(&mut self, coverage_spans: &[CoverageSpan]) -> Result<(), Error> {
+    fn make_bcb_counters(
+        &mut self,
+        bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool,
+    ) -> Result<(), Error> {
         debug!("make_bcb_counters(): adding a counter or expression to each BasicCoverageBlock");
-        let num_bcbs = self.basic_coverage_blocks.num_nodes();
-
-        let mut bcbs_with_coverage = BitSet::new_empty(num_bcbs);
-        for covspan in coverage_spans {
-            bcbs_with_coverage.insert(covspan.bcb);
-        }
 
         // Walk the `CoverageGraph`. For each `BasicCoverageBlock` node with an associated
-        // `CoverageSpan`, add a counter. If the `BasicCoverageBlock` branches, add a counter or
+        // coverage span, add a counter. If the `BasicCoverageBlock` branches, add a counter or
         // expression to each branch `BasicCoverageBlock` (if the branch BCB has only one incoming
         // edge) or edge from the branching BCB to the branch BCB (if the branch BCB has multiple
         // incoming edges).
@@ -254,17 +236,17 @@ impl<'a> MakeBcbCounters<'a> {
         // the loop. The `traversal` state includes a `context_stack`, providing a way to know if
         // the current BCB is in one or more nested loops or not.
         let mut traversal = TraverseCoverageGraphWithLoops::new(&self.basic_coverage_blocks);
-        while let Some(bcb) = traversal.next(self.basic_coverage_blocks) {
-            if bcbs_with_coverage.contains(bcb) {
-                debug!("{:?} has at least one `CoverageSpan`. Get or make its counter", bcb);
+        while let Some(bcb) = traversal.next() {
+            if bcb_has_coverage_spans(bcb) {
+                debug!("{:?} has at least one coverage span. Get or make its counter", bcb);
                 let branching_counter_operand = self.get_or_make_counter_operand(bcb)?;
 
                 if self.bcb_needs_branch_counters(bcb) {
-                    self.make_branch_counters(&mut traversal, bcb, branching_counter_operand)?;
+                    self.make_branch_counters(&traversal, bcb, branching_counter_operand)?;
                 }
             } else {
                 debug!(
-                    "{:?} does not have any `CoverageSpan`s. A counter will only be added if \
+                    "{:?} does not have any coverage spans. A counter will only be added if \
                     and when a covered BCB has an expression dependency.",
                     bcb,
                 );
@@ -283,9 +265,9 @@ impl<'a> MakeBcbCounters<'a> {
 
     fn make_branch_counters(
         &mut self,
-        traversal: &mut TraverseCoverageGraphWithLoops,
+        traversal: &TraverseCoverageGraphWithLoops<'_>,
         branching_bcb: BasicCoverageBlock,
-        branching_counter_operand: Operand,
+        branching_counter_operand: CovTerm,
     ) -> Result<(), Error> {
         let branches = self.bcb_branches(branching_bcb);
         debug!(
@@ -333,8 +315,7 @@ impl<'a> MakeBcbCounters<'a> {
                         sumup_counter_operand,
                     );
                     debug!("  [new intermediate expression: {:?}]", intermediate_expression);
-                    let intermediate_expression_operand = intermediate_expression.as_operand();
-                    self.coverage_counters.intermediate_expressions.push(intermediate_expression);
+                    let intermediate_expression_operand = intermediate_expression.as_term();
                     some_sumup_counter_operand.replace(intermediate_expression_operand);
                 }
             }
@@ -365,7 +346,7 @@ impl<'a> MakeBcbCounters<'a> {
         Ok(())
     }
 
-    fn get_or_make_counter_operand(&mut self, bcb: BasicCoverageBlock) -> Result<Operand, Error> {
+    fn get_or_make_counter_operand(&mut self, bcb: BasicCoverageBlock) -> Result<CovTerm, Error> {
         self.recursive_get_or_make_counter_operand(bcb, 1)
     }
 
@@ -373,7 +354,7 @@ impl<'a> MakeBcbCounters<'a> {
         &mut self,
         bcb: BasicCoverageBlock,
         debug_indent_level: usize,
-    ) -> Result<Operand, Error> {
+    ) -> Result<CovTerm, Error> {
         // If the BCB already has a counter, return it.
         if let Some(counter_kind) = &self.coverage_counters.bcb_counters[bcb] {
             debug!(
@@ -382,7 +363,7 @@ impl<'a> MakeBcbCounters<'a> {
                 bcb,
                 counter_kind,
             );
-            return Ok(counter_kind.as_operand());
+            return Ok(counter_kind.as_term());
         }
 
         // A BCB with only one incoming edge gets a simple `Counter` (via `make_counter()`).
@@ -446,8 +427,7 @@ impl<'a> MakeBcbCounters<'a> {
                     NESTED_INDENT.repeat(debug_indent_level),
                     intermediate_expression
                 );
-                let intermediate_expression_operand = intermediate_expression.as_operand();
-                self.coverage_counters.intermediate_expressions.push(intermediate_expression);
+                let intermediate_expression_operand = intermediate_expression.as_term();
                 some_sumup_edge_counter_operand.replace(intermediate_expression_operand);
             }
         }
@@ -469,7 +449,7 @@ impl<'a> MakeBcbCounters<'a> {
         &mut self,
         from_bcb: BasicCoverageBlock,
         to_bcb: BasicCoverageBlock,
-    ) -> Result<Operand, Error> {
+    ) -> Result<CovTerm, Error> {
         self.recursive_get_or_make_edge_counter_operand(from_bcb, to_bcb, 1)
     }
 
@@ -478,7 +458,7 @@ impl<'a> MakeBcbCounters<'a> {
         from_bcb: BasicCoverageBlock,
         to_bcb: BasicCoverageBlock,
         debug_indent_level: usize,
-    ) -> Result<Operand, Error> {
+    ) -> Result<CovTerm, Error> {
         // If the source BCB has only one successor (assumed to be the given target), an edge
         // counter is unnecessary. Just get or make a counter for the source BCB.
         let successors = self.bcb_successors(from_bcb).iter();
@@ -497,7 +477,7 @@ impl<'a> MakeBcbCounters<'a> {
                 to_bcb,
                 counter_kind
             );
-            return Ok(counter_kind.as_operand());
+            return Ok(counter_kind.as_term());
         }
 
         // Make a new counter to count this edge.
@@ -516,21 +496,14 @@ impl<'a> MakeBcbCounters<'a> {
     /// found, select any branch.
     fn choose_preferred_expression_branch(
         &self,
-        traversal: &TraverseCoverageGraphWithLoops,
+        traversal: &TraverseCoverageGraphWithLoops<'_>,
         branches: &[BcbBranch],
     ) -> BcbBranch {
-        let branch_needs_a_counter = |branch: &BcbBranch| self.branch_has_no_counter(branch);
-
-        let some_reloop_branch = self.find_some_reloop_branch(traversal, &branches);
-        if let Some(reloop_branch_without_counter) =
-            some_reloop_branch.filter(branch_needs_a_counter)
-        {
-            debug!(
-                "Selecting reloop_branch={:?} that still needs a counter, to get the \
-                `Expression`",
-                reloop_branch_without_counter
-            );
-            reloop_branch_without_counter
+        let good_reloop_branch = self.find_good_reloop_branch(traversal, &branches);
+        if let Some(reloop_branch) = good_reloop_branch {
+            assert!(self.branch_has_no_counter(&reloop_branch));
+            debug!("Selecting reloop branch {reloop_branch:?} to get an expression");
+            reloop_branch
         } else {
             let &branch_without_counter =
                 branches.iter().find(|&branch| self.branch_has_no_counter(branch)).expect(
@@ -547,75 +520,52 @@ impl<'a> MakeBcbCounters<'a> {
         }
     }
 
-    /// At most, one of the branches (or its edge, from the branching_bcb, if the branch has
-    /// multiple incoming edges) can have a counter computed by expression.
-    ///
-    /// If at least one of the branches leads outside of a loop (`found_loop_exit` is
-    /// true), and at least one other branch does not exit the loop (the first of which
-    /// is captured in `some_reloop_branch`), it's likely any reloop branch will be
-    /// executed far more often than loop exit branch, making the reloop branch a better
-    /// candidate for an expression.
-    fn find_some_reloop_branch(
+    /// Tries to find a branch that leads back to the top of a loop, and that
+    /// doesn't already have a counter. Such branches are good candidates to
+    /// be given an expression (instead of a physical counter), because they
+    /// will tend to be executed more times than a loop-exit branch.
+    fn find_good_reloop_branch(
         &self,
-        traversal: &TraverseCoverageGraphWithLoops,
+        traversal: &TraverseCoverageGraphWithLoops<'_>,
         branches: &[BcbBranch],
     ) -> Option<BcbBranch> {
-        let branch_needs_a_counter = |branch: &BcbBranch| self.branch_has_no_counter(branch);
-
-        let mut some_reloop_branch: Option<BcbBranch> = None;
-        for context in traversal.context_stack.iter().rev() {
-            if let Some((backedge_from_bcbs, _)) = &context.loop_backedges {
-                let mut found_loop_exit = false;
-                for &branch in branches.iter() {
-                    if backedge_from_bcbs.iter().any(|&backedge_from_bcb| {
-                        self.bcb_dominates(branch.target_bcb, backedge_from_bcb)
-                    }) {
-                        if let Some(reloop_branch) = some_reloop_branch {
-                            if self.branch_has_no_counter(&reloop_branch) {
-                                // we already found a candidate reloop_branch that still
-                                // needs a counter
-                                continue;
-                            }
-                        }
-                        // The path from branch leads back to the top of the loop. Set this
-                        // branch as the `reloop_branch`. If this branch already has a
-                        // counter, and we find another reloop branch that doesn't have a
-                        // counter yet, that branch will be selected as the `reloop_branch`
-                        // instead.
-                        some_reloop_branch = Some(branch);
-                    } else {
-                        // The path from branch leads outside this loop
-                        found_loop_exit = true;
+        // Consider each loop on the current traversal context stack, top-down.
+        for reloop_bcbs in traversal.reloop_bcbs_per_loop() {
+            let mut all_branches_exit_this_loop = true;
+
+            // Try to find a branch that doesn't exit this loop and doesn't
+            // already have a counter.
+            for &branch in branches {
+                // A branch is a reloop branch if it dominates any BCB that has
+                // an edge back to the loop header. (Other branches are exits.)
+                let is_reloop_branch = reloop_bcbs.iter().any(|&reloop_bcb| {
+                    self.basic_coverage_blocks.dominates(branch.target_bcb, reloop_bcb)
+                });
+
+                if is_reloop_branch {
+                    all_branches_exit_this_loop = false;
+                    if self.branch_has_no_counter(&branch) {
+                        // We found a good branch to be given an expression.
+                        return Some(branch);
                     }
-                    if found_loop_exit
-                        && some_reloop_branch.filter(branch_needs_a_counter).is_some()
-                    {
-                        // Found both a branch that exits the loop and a branch that returns
-                        // to the top of the loop (`reloop_branch`), and the `reloop_branch`
-                        // doesn't already have a counter.
-                        break;
-                    }
-                }
-                if !found_loop_exit {
-                    debug!(
-                        "No branches exit the loop, so any branch without an existing \
-                        counter can have the `Expression`."
-                    );
-                    break;
-                }
-                if some_reloop_branch.is_some() {
-                    debug!(
-                        "Found a branch that exits the loop and a branch the loops back to \
-                        the top of the loop (`reloop_branch`). The `reloop_branch` will \
-                        get the `Expression`, as long as it still needs a counter."
-                    );
-                    break;
+                    // Keep looking for another reloop branch without a counter.
+                } else {
+                    // This branch exits the loop.
                 }
-                // else all branches exited this loop context, so run the same checks with
-                // the outer loop(s)
             }
+
+            if !all_branches_exit_this_loop {
+                // We found one or more reloop branches, but all of them already
+                // have counters. Let the caller choose one of the exit branches.
+                debug!("All reloop branches had counters; skip checking the other loops");
+                return None;
+            }
+
+            // All of the branches exit this loop, so keep looking for a good
+            // reloop branch for one of the outer loops.
         }
-        some_reloop_branch
+
+        None
     }
 
     #[inline]
@@ -661,9 +611,4 @@ impl<'a> MakeBcbCounters<'a> {
     fn bcb_has_one_path_to_target(&self, bcb: BasicCoverageBlock) -> bool {
         self.bcb_predecessors(bcb).len() <= 1
     }
-
-    #[inline]
-    fn bcb_dominates(&self, dom: BasicCoverageBlock, node: BasicCoverageBlock) -> bool {
-        self.basic_coverage_blocks.dominates(dom, node)
-    }
 }
diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs
index ff2254d6941..6bab62aa854 100644
--- a/compiler/rustc_mir_transform/src/coverage/graph.rs
+++ b/compiler/rustc_mir_transform/src/coverage/graph.rs
@@ -1,10 +1,12 @@
+use rustc_data_structures::captures::Captures;
 use rustc_data_structures::graph::dominators::{self, Dominators};
 use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode};
 use rustc_index::bit_set::BitSet;
 use rustc_index::{IndexSlice, IndexVec};
-use rustc_middle::mir::{self, BasicBlock, BasicBlockData, Terminator, TerminatorKind};
+use rustc_middle::mir::{self, BasicBlock, TerminatorKind};
 
 use std::cmp::Ordering;
+use std::collections::VecDeque;
 use std::ops::{Index, IndexMut};
 
 /// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s
@@ -36,9 +38,8 @@ impl CoverageGraph {
                 }
                 let bcb_data = &bcbs[bcb];
                 let mut bcb_successors = Vec::new();
-                for successor in
-                    bcb_filtered_successors(&mir_body, &bcb_data.terminator(mir_body).kind)
-                        .filter_map(|successor_bb| bb_to_bcb[successor_bb])
+                for successor in bcb_filtered_successors(&mir_body, bcb_data.last_bb())
+                    .filter_map(|successor_bb| bb_to_bcb[successor_bb])
                 {
                     if !seen[successor] {
                         seen[successor] = true;
@@ -80,10 +81,9 @@ impl CoverageGraph {
         // intentionally omits unwind paths.
         // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
         // `catch_unwind()` handlers.
-        let mir_cfg_without_unwind = ShortCircuitPreorder::new(&mir_body, bcb_filtered_successors);
 
         let mut basic_blocks = Vec::new();
-        for (bb, data) in mir_cfg_without_unwind {
+        for bb in short_circuit_preorder(mir_body, bcb_filtered_successors) {
             if let Some(last) = basic_blocks.last() {
                 let predecessors = &mir_body.basic_blocks.predecessors()[bb];
                 if predecessors.len() > 1 || !predecessors.contains(last) {
@@ -109,7 +109,7 @@ impl CoverageGraph {
             }
             basic_blocks.push(bb);
 
-            let term = data.terminator();
+            let term = mir_body[bb].terminator();
 
             match term.kind {
                 TerminatorKind::Return { .. }
@@ -147,7 +147,7 @@ impl CoverageGraph {
                 | TerminatorKind::Unreachable
                 | TerminatorKind::Drop { .. }
                 | TerminatorKind::Call { .. }
-                | TerminatorKind::GeneratorDrop
+                | TerminatorKind::CoroutineDrop
                 | TerminatorKind::Assert { .. }
                 | TerminatorKind::FalseEdge { .. }
                 | TerminatorKind::FalseUnwind { .. }
@@ -288,9 +288,9 @@ rustc_index::newtype_index! {
 ///     not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as
 ///     a `Goto`, and merged with its successor into the same BCB.
 ///
-/// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`.
+/// Each BCB with at least one computed coverage span will have no more than one `Counter`.
 /// In some cases, a BCB's execution count can be computed by `Expression`. Additional
-/// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO`
+/// disjoint coverage spans in a BCB can also be counted by `Expression` (by adding `ZERO`
 /// to the BCB's primary counter or expression).
 ///
 /// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based
@@ -316,11 +316,6 @@ impl BasicCoverageBlockData {
     pub fn last_bb(&self) -> BasicBlock {
         *self.basic_blocks.last().unwrap()
     }
-
-    #[inline(always)]
-    pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> {
-        &mir_body[self.last_bb()].terminator()
-    }
 }
 
 /// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)
@@ -362,26 +357,28 @@ impl std::fmt::Debug for BcbBranch {
     }
 }
 
-// Returns the `Terminator`s non-unwind successors.
+// Returns the subset of a block's successors that are relevant to the coverage
+// graph, i.e. those that do not represent unwinds or unreachable branches.
 // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
 // `catch_unwind()` handlers.
 fn bcb_filtered_successors<'a, 'tcx>(
     body: &'a mir::Body<'tcx>,
-    term_kind: &'a TerminatorKind<'tcx>,
-) -> Box<dyn Iterator<Item = BasicBlock> + 'a> {
-    Box::new(
-        match &term_kind {
-            // SwitchInt successors are never unwind, and all of them should be traversed.
-            TerminatorKind::SwitchInt { ref targets, .. } => {
-                None.into_iter().chain(targets.all_targets().into_iter().copied())
-            }
-            // For all other kinds, return only the first successor, if any, and ignore unwinds.
-            // NOTE: `chain(&[])` is required to coerce the `option::iter` (from
-            // `next().into_iter()`) into the `mir::Successors` aliased type.
-            _ => term_kind.successors().next().into_iter().chain((&[]).into_iter().copied()),
-        }
-        .filter(move |&successor| body[successor].terminator().kind != TerminatorKind::Unreachable),
-    )
+    bb: BasicBlock,
+) -> impl Iterator<Item = BasicBlock> + Captures<'a> + Captures<'tcx> {
+    let terminator = body[bb].terminator();
+
+    let take_n_successors = match terminator.kind {
+        // SwitchInt successors are never unwinds, so all of them should be traversed.
+        TerminatorKind::SwitchInt { .. } => usize::MAX,
+        // For all other kinds, return only the first successor (if any), ignoring any
+        // unwind successors.
+        _ => 1,
+    };
+
+    terminator
+        .successors()
+        .take(take_n_successors)
+        .filter(move |&successor| body[successor].terminator().kind != TerminatorKind::Unreachable)
 }
 
 /// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
@@ -389,57 +386,72 @@ fn bcb_filtered_successors<'a, 'tcx>(
 /// ensures a loop is completely traversed before processing Blocks after the end of the loop.
 #[derive(Debug)]
 pub(super) struct TraversalContext {
-    /// From one or more backedges returning to a loop header.
-    pub loop_backedges: Option<(Vec<BasicCoverageBlock>, BasicCoverageBlock)>,
-
-    /// worklist, to be traversed, of CoverageGraph in the loop with the given loop
-    /// backedges, such that the loop is the inner inner-most loop containing these
-    /// CoverageGraph
-    pub worklist: Vec<BasicCoverageBlock>,
+    /// BCB with one or more incoming loop backedges, indicating which loop
+    /// this context is for.
+    ///
+    /// If `None`, this is the non-loop context for the function as a whole.
+    loop_header: Option<BasicCoverageBlock>,
+
+    /// Worklist of BCBs to be processed in this context.
+    worklist: VecDeque<BasicCoverageBlock>,
 }
 
-pub(super) struct TraverseCoverageGraphWithLoops {
-    pub backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
-    pub context_stack: Vec<TraversalContext>,
+pub(super) struct TraverseCoverageGraphWithLoops<'a> {
+    basic_coverage_blocks: &'a CoverageGraph,
+
+    backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
+    context_stack: Vec<TraversalContext>,
     visited: BitSet<BasicCoverageBlock>,
 }
 
-impl TraverseCoverageGraphWithLoops {
-    pub fn new(basic_coverage_blocks: &CoverageGraph) -> Self {
-        let start_bcb = basic_coverage_blocks.start_node();
+impl<'a> TraverseCoverageGraphWithLoops<'a> {
+    pub(super) fn new(basic_coverage_blocks: &'a CoverageGraph) -> Self {
         let backedges = find_loop_backedges(basic_coverage_blocks);
-        let context_stack =
-            vec![TraversalContext { loop_backedges: None, worklist: vec![start_bcb] }];
+
+        let worklist = VecDeque::from([basic_coverage_blocks.start_node()]);
+        let context_stack = vec![TraversalContext { loop_header: None, worklist }];
+
         // `context_stack` starts with a `TraversalContext` for the main function context (beginning
         // with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top
         // of the stack as loops are entered, and popped off of the stack when a loop's worklist is
         // exhausted.
         let visited = BitSet::new_empty(basic_coverage_blocks.num_nodes());
-        Self { backedges, context_stack, visited }
+        Self { basic_coverage_blocks, backedges, context_stack, visited }
     }
 
-    pub fn next(&mut self, basic_coverage_blocks: &CoverageGraph) -> Option<BasicCoverageBlock> {
+    /// For each loop on the loop context stack (top-down), yields a list of BCBs
+    /// within that loop that have an outgoing edge back to the loop header.
+    pub(super) fn reloop_bcbs_per_loop(&self) -> impl Iterator<Item = &[BasicCoverageBlock]> {
+        self.context_stack
+            .iter()
+            .rev()
+            .filter_map(|context| context.loop_header)
+            .map(|header_bcb| self.backedges[header_bcb].as_slice())
+    }
+
+    pub(super) fn next(&mut self) -> Option<BasicCoverageBlock> {
         debug!(
             "TraverseCoverageGraphWithLoops::next - context_stack: {:?}",
             self.context_stack.iter().rev().collect::<Vec<_>>()
         );
 
         while let Some(context) = self.context_stack.last_mut() {
-            if let Some(next_bcb) = context.worklist.pop() {
-                if !self.visited.insert(next_bcb) {
-                    debug!("Already visited: {:?}", next_bcb);
+            if let Some(bcb) = context.worklist.pop_front() {
+                if !self.visited.insert(bcb) {
+                    debug!("Already visited: {bcb:?}");
                     continue;
                 }
-                debug!("Visiting {:?}", next_bcb);
-                if self.backedges[next_bcb].len() > 0 {
-                    debug!("{:?} is a loop header! Start a new TraversalContext...", next_bcb);
+                debug!("Visiting {bcb:?}");
+
+                if self.backedges[bcb].len() > 0 {
+                    debug!("{bcb:?} is a loop header! Start a new TraversalContext...");
                     self.context_stack.push(TraversalContext {
-                        loop_backedges: Some((self.backedges[next_bcb].clone(), next_bcb)),
-                        worklist: Vec::new(),
+                        loop_header: Some(bcb),
+                        worklist: VecDeque::new(),
                     });
                 }
-                self.extend_worklist(basic_coverage_blocks, next_bcb);
-                return Some(next_bcb);
+                self.add_successors_to_worklists(bcb);
+                return Some(bcb);
             } else {
                 // Strip contexts with empty worklists from the top of the stack
                 self.context_stack.pop();
@@ -449,13 +461,10 @@ impl TraverseCoverageGraphWithLoops {
         None
     }
 
-    pub fn extend_worklist(
-        &mut self,
-        basic_coverage_blocks: &CoverageGraph,
-        bcb: BasicCoverageBlock,
-    ) {
-        let successors = &basic_coverage_blocks.successors[bcb];
+    pub fn add_successors_to_worklists(&mut self, bcb: BasicCoverageBlock) {
+        let successors = &self.basic_coverage_blocks.successors[bcb];
         debug!("{:?} has {} successors:", bcb, successors.len());
+
         for &successor in successors {
             if successor == bcb {
                 debug!(
@@ -464,56 +473,44 @@ impl TraverseCoverageGraphWithLoops {
                     bcb
                 );
                 // Don't re-add this successor to the worklist. We are already processing it.
+                // FIXME: This claims to skip just the self-successor, but it actually skips
+                // all other successors as well. Does that matter?
                 break;
             }
-            for context in self.context_stack.iter_mut().rev() {
-                // Add successors of the current BCB to the appropriate context. Successors that
-                // stay within a loop are added to the BCBs context worklist. Successors that
-                // exit the loop (they are not dominated by the loop header) must be reachable
-                // from other BCBs outside the loop, and they will be added to a different
-                // worklist.
-                //
-                // Branching blocks (with more than one successor) must be processed before
-                // blocks with only one successor, to prevent unnecessarily complicating
-                // `Expression`s by creating a Counter in a `BasicCoverageBlock` that the
-                // branching block would have given an `Expression` (or vice versa).
-                let (some_successor_to_add, some_loop_header) =
-                    if let Some((_, loop_header)) = context.loop_backedges {
-                        if basic_coverage_blocks.dominates(loop_header, successor) {
-                            (Some(successor), Some(loop_header))
-                        } else {
-                            (None, None)
-                        }
-                    } else {
-                        (Some(successor), None)
-                    };
-                if let Some(successor_to_add) = some_successor_to_add {
-                    if basic_coverage_blocks.successors[successor_to_add].len() > 1 {
-                        debug!(
-                            "{:?} successor is branching. Prioritize it at the beginning of \
-                            the {}",
-                            successor_to_add,
-                            if let Some(loop_header) = some_loop_header {
-                                format!("worklist for the loop headed by {loop_header:?}")
-                            } else {
-                                String::from("non-loop worklist")
-                            },
-                        );
-                        context.worklist.insert(0, successor_to_add);
-                    } else {
-                        debug!(
-                            "{:?} successor is non-branching. Defer it to the end of the {}",
-                            successor_to_add,
-                            if let Some(loop_header) = some_loop_header {
-                                format!("worklist for the loop headed by {loop_header:?}")
-                            } else {
-                                String::from("non-loop worklist")
-                            },
-                        );
-                        context.worklist.push(successor_to_add);
+
+            // Add successors of the current BCB to the appropriate context. Successors that
+            // stay within a loop are added to the BCBs context worklist. Successors that
+            // exit the loop (they are not dominated by the loop header) must be reachable
+            // from other BCBs outside the loop, and they will be added to a different
+            // worklist.
+            //
+            // Branching blocks (with more than one successor) must be processed before
+            // blocks with only one successor, to prevent unnecessarily complicating
+            // `Expression`s by creating a Counter in a `BasicCoverageBlock` that the
+            // branching block would have given an `Expression` (or vice versa).
+
+            let context = self
+                .context_stack
+                .iter_mut()
+                .rev()
+                .find(|context| match context.loop_header {
+                    Some(loop_header) => {
+                        self.basic_coverage_blocks.dominates(loop_header, successor)
                     }
-                    break;
-                }
+                    None => true,
+                })
+                .unwrap_or_else(|| bug!("should always fall back to the root non-loop context"));
+            debug!("adding to worklist for {:?}", context.loop_header);
+
+            // FIXME: The code below had debug messages claiming to add items to a
+            // particular end of the worklist, but was confused about which end was
+            // which. The existing behaviour has been preserved for now, but it's
+            // unclear what the intended behaviour was.
+
+            if self.basic_coverage_blocks.successors[successor].len() > 1 {
+                context.worklist.push_back(successor);
+            } else {
+                context.worklist.push_front(successor);
             }
         }
     }
@@ -553,66 +550,28 @@ pub(super) fn find_loop_backedges(
     backedges
 }
 
-pub struct ShortCircuitPreorder<
-    'a,
-    'tcx,
-    F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>,
-> {
+fn short_circuit_preorder<'a, 'tcx, F, Iter>(
     body: &'a mir::Body<'tcx>,
-    visited: BitSet<BasicBlock>,
-    worklist: Vec<BasicBlock>,
     filtered_successors: F,
-}
-
-impl<
-    'a,
-    'tcx,
-    F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>,
-> ShortCircuitPreorder<'a, 'tcx, F>
-{
-    pub fn new(
-        body: &'a mir::Body<'tcx>,
-        filtered_successors: F,
-    ) -> ShortCircuitPreorder<'a, 'tcx, F> {
-        let worklist = vec![mir::START_BLOCK];
-
-        ShortCircuitPreorder {
-            body,
-            visited: BitSet::new_empty(body.basic_blocks.len()),
-            worklist,
-            filtered_successors,
-        }
-    }
-}
-
-impl<
-    'a,
-    'tcx,
-    F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>,
-> Iterator for ShortCircuitPreorder<'a, 'tcx, F>
+) -> impl Iterator<Item = BasicBlock> + Captures<'a> + Captures<'tcx>
+where
+    F: Fn(&'a mir::Body<'tcx>, BasicBlock) -> Iter,
+    Iter: Iterator<Item = BasicBlock>,
 {
-    type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+    let mut visited = BitSet::new_empty(body.basic_blocks.len());
+    let mut worklist = vec![mir::START_BLOCK];
 
-    fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
-        while let Some(idx) = self.worklist.pop() {
-            if !self.visited.insert(idx) {
+    std::iter::from_fn(move || {
+        while let Some(bb) = worklist.pop() {
+            if !visited.insert(bb) {
                 continue;
             }
 
-            let data = &self.body[idx];
-
-            if let Some(ref term) = data.terminator {
-                self.worklist.extend((self.filtered_successors)(&self.body, &term.kind));
-            }
+            worklist.extend(filtered_successors(body, bb));
 
-            return Some((idx, data));
+            return Some(bb);
         }
 
         None
-    }
-
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        let size = self.body.basic_blocks.len() - self.visited.count();
-        (size, Some(size))
-    }
+    })
 }
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index c75d33eeb31..c9b36ba25ac 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -8,14 +8,12 @@ mod spans;
 mod tests;
 
 use self::counters::{BcbCounter, CoverageCounters};
-use self::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
-use self::spans::{CoverageSpan, CoverageSpans};
+use self::graph::CoverageGraph;
+use self::spans::CoverageSpans;
 
 use crate::MirPass;
 
-use rustc_data_structures::graph::WithNumNodes;
 use rustc_data_structures::sync::Lrc;
-use rustc_index::IndexVec;
 use rustc_middle::hir;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::mir::coverage::*;
@@ -154,7 +152,7 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
         let body_span = self.body_span;
 
         ////////////////////////////////////////////////////
-        // Compute `CoverageSpan`s from the `CoverageGraph`.
+        // Compute coverage spans from the `CoverageGraph`.
         let coverage_spans = CoverageSpans::generate_coverage_spans(
             &self.mir_body,
             fn_sig_span,
@@ -164,179 +162,109 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
 
         ////////////////////////////////////////////////////
         // Create an optimized mix of `Counter`s and `Expression`s for the `CoverageGraph`. Ensure
-        // every `CoverageSpan` has a `Counter` or `Expression` assigned to its `BasicCoverageBlock`
+        // every coverage span has a `Counter` or `Expression` assigned to its `BasicCoverageBlock`
         // and all `Expression` dependencies (operands) are also generated, for any other
-        // `BasicCoverageBlock`s not already associated with a `CoverageSpan`.
-        //
-        // Intermediate expressions (used to compute other `Expression` values), which have no
-        // direct association with any `BasicCoverageBlock`, are accumulated inside `coverage_counters`.
-        let result = self
-            .coverage_counters
-            .make_bcb_counters(&mut self.basic_coverage_blocks, &coverage_spans);
-
-        if let Ok(()) = result {
-            ////////////////////////////////////////////////////
-            // Remove the counter or edge counter from of each `CoverageSpan`s associated
-            // `BasicCoverageBlock`, and inject a `Coverage` statement into the MIR.
-            //
-            // `Coverage` statements injected from `CoverageSpan`s will include the code regions
-            // (source code start and end positions) to be counted by the associated counter.
-            //
-            // These `CoverageSpan`-associated counters are removed from their associated
-            // `BasicCoverageBlock`s so that the only remaining counters in the `CoverageGraph`
-            // are indirect counters (to be injected next, without associated code regions).
-            self.inject_coverage_span_counters(coverage_spans);
-
-            ////////////////////////////////////////////////////
-            // For any remaining `BasicCoverageBlock` counters (that were not associated with
-            // any `CoverageSpan`), inject `Coverage` statements (_without_ code region `Span`s)
-            // to ensure `BasicCoverageBlock` counters that other `Expression`s may depend on
-            // are in fact counted, even though they don't directly contribute to counting
-            // their own independent code region's coverage.
-            self.inject_indirect_counters();
-
-            // Intermediate expressions will be injected as the final step, after generating
-            // debug output, if any.
-            ////////////////////////////////////////////////////
-        };
-
-        if let Err(e) = result {
-            bug!("Error processing: {:?}: {:?}", self.mir_body.source.def_id(), e.message)
-        };
-
-        ////////////////////////////////////////////////////
-        // Finally, inject the intermediate expressions collected along the way.
-        for intermediate_expression in &self.coverage_counters.intermediate_expressions {
-            inject_intermediate_expression(
-                self.mir_body,
-                self.make_mir_coverage_kind(intermediate_expression),
-            );
-        }
+        // `BasicCoverageBlock`s not already associated with a coverage span.
+        let bcb_has_coverage_spans = |bcb| coverage_spans.bcb_has_coverage_spans(bcb);
+        self.coverage_counters
+            .make_bcb_counters(&mut self.basic_coverage_blocks, bcb_has_coverage_spans)
+            .unwrap_or_else(|e| {
+                bug!("Error processing: {:?}: {:?}", self.mir_body.source.def_id(), e.message)
+            });
+
+        let mappings = self.create_mappings_and_inject_coverage_statements(&coverage_spans);
+
+        self.mir_body.function_coverage_info = Some(Box::new(FunctionCoverageInfo {
+            function_source_hash: self.function_source_hash,
+            num_counters: self.coverage_counters.num_counters(),
+            expressions: self.coverage_counters.take_expressions(),
+            mappings,
+        }));
     }
 
-    /// Inject a counter for each `CoverageSpan`. There can be multiple `CoverageSpan`s for a given
-    /// BCB, but only one actual counter needs to be incremented per BCB. `bb_counters` maps each
-    /// `bcb` to its `Counter`, when injected. Subsequent `CoverageSpan`s for a BCB that already has
-    /// a `Counter` will inject an `Expression` instead, and compute its value by adding `ZERO` to
-    /// the BCB `Counter` value.
-    fn inject_coverage_span_counters(&mut self, coverage_spans: Vec<CoverageSpan>) {
-        let tcx = self.tcx;
-        let source_map = tcx.sess.source_map();
+    /// For each [`BcbCounter`] associated with a BCB node or BCB edge, create
+    /// any corresponding mappings (for BCB nodes only), and inject any necessary
+    /// coverage statements into MIR.
+    fn create_mappings_and_inject_coverage_statements(
+        &mut self,
+        coverage_spans: &CoverageSpans,
+    ) -> Vec<Mapping> {
+        let source_map = self.tcx.sess.source_map();
         let body_span = self.body_span;
-        let file_name = Symbol::intern(&self.source_file.name.prefer_remapped().to_string_lossy());
-
-        let mut bcb_counters = IndexVec::from_elem_n(None, self.basic_coverage_blocks.num_nodes());
-        for covspan in coverage_spans {
-            let bcb = covspan.bcb;
-            let span = covspan.span;
-            let counter_kind = if let Some(&counter_operand) = bcb_counters[bcb].as_ref() {
-                self.coverage_counters.make_identity_counter(counter_operand)
-            } else if let Some(counter_kind) = self.coverage_counters.take_bcb_counter(bcb) {
-                bcb_counters[bcb] = Some(counter_kind.as_operand());
-                counter_kind
-            } else {
-                bug!("Every BasicCoverageBlock should have a Counter or Expression");
-            };
 
-            let code_region = make_code_region(source_map, file_name, span, body_span);
-
-            inject_statement(
-                self.mir_body,
-                self.make_mir_coverage_kind(&counter_kind),
-                self.bcb_leader_bb(bcb),
-                Some(code_region),
-            );
-        }
-    }
-
-    /// `inject_coverage_span_counters()` looped through the `CoverageSpan`s and injected the
-    /// counter from the `CoverageSpan`s `BasicCoverageBlock`, removing it from the BCB in the
-    /// process (via `take_counter()`).
-    ///
-    /// Any other counter associated with a `BasicCoverageBlock`, or its incoming edge, but not
-    /// associated with a `CoverageSpan`, should only exist if the counter is an `Expression`
-    /// dependency (one of the expression operands). Collect them, and inject the additional
-    /// counters into the MIR, without a reportable coverage span.
-    fn inject_indirect_counters(&mut self) {
-        let mut bcb_counters_without_direct_coverage_spans = Vec::new();
-        for (target_bcb, counter_kind) in self.coverage_counters.drain_bcb_counters() {
-            bcb_counters_without_direct_coverage_spans.push((None, target_bcb, counter_kind));
-        }
-        for ((from_bcb, target_bcb), counter_kind) in
-            self.coverage_counters.drain_bcb_edge_counters()
-        {
-            bcb_counters_without_direct_coverage_spans.push((
-                Some(from_bcb),
-                target_bcb,
-                counter_kind,
-            ));
-        }
+        use rustc_session::RemapFileNameExt;
+        let file_name =
+            Symbol::intern(&self.source_file.name.for_codegen(self.tcx.sess).to_string_lossy());
+
+        let mut mappings = Vec::new();
+
+        // Process the counters and spans associated with BCB nodes.
+        for (bcb, counter_kind) in self.coverage_counters.bcb_node_counters() {
+            let spans = coverage_spans.spans_for_bcb(bcb);
+            let has_mappings = !spans.is_empty();
+
+            // If this BCB has any coverage spans, add corresponding mappings to
+            // the mappings table.
+            if has_mappings {
+                let term = counter_kind.as_term();
+                mappings.extend(spans.iter().map(|&span| {
+                    let code_region = make_code_region(source_map, file_name, span, body_span);
+                    Mapping { code_region, term }
+                }));
+            }
 
-        for (edge_from_bcb, target_bcb, counter_kind) in bcb_counters_without_direct_coverage_spans
-        {
-            match counter_kind {
-                BcbCounter::Counter { .. } => {
-                    let inject_to_bb = if let Some(from_bcb) = edge_from_bcb {
-                        // The MIR edge starts `from_bb` (the outgoing / last BasicBlock in
-                        // `from_bcb`) and ends at `to_bb` (the incoming / first BasicBlock in the
-                        // `target_bcb`; also called the `leader_bb`).
-                        let from_bb = self.bcb_last_bb(from_bcb);
-                        let to_bb = self.bcb_leader_bb(target_bcb);
-
-                        let new_bb = inject_edge_counter_basic_block(self.mir_body, from_bb, to_bb);
-                        debug!(
-                            "Edge {:?} (last {:?}) -> {:?} (leader {:?}) requires a new MIR \
-                            BasicBlock {:?}, for unclaimed edge counter {:?}",
-                            edge_from_bcb, from_bb, target_bcb, to_bb, new_bb, counter_kind,
-                        );
-                        new_bb
-                    } else {
-                        let target_bb = self.bcb_last_bb(target_bcb);
-                        debug!(
-                            "{:?} ({:?}) gets a new Coverage statement for unclaimed counter {:?}",
-                            target_bcb, target_bb, counter_kind,
-                        );
-                        target_bb
-                    };
-
-                    inject_statement(
-                        self.mir_body,
-                        self.make_mir_coverage_kind(&counter_kind),
-                        inject_to_bb,
-                        None,
-                    );
-                }
-                BcbCounter::Expression { .. } => inject_intermediate_expression(
+            let do_inject = match counter_kind {
+                // Counter-increment statements always need to be injected.
+                BcbCounter::Counter { .. } => true,
+                // The only purpose of expression-used statements is to detect
+                // when a mapping is unreachable, so we only inject them for
+                // expressions with one or more mappings.
+                BcbCounter::Expression { .. } => has_mappings,
+            };
+            if do_inject {
+                inject_statement(
                     self.mir_body,
-                    self.make_mir_coverage_kind(&counter_kind),
-                ),
+                    self.make_mir_coverage_kind(counter_kind),
+                    self.basic_coverage_blocks[bcb].leader_bb(),
+                );
             }
         }
-    }
 
-    #[inline]
-    fn bcb_leader_bb(&self, bcb: BasicCoverageBlock) -> BasicBlock {
-        self.bcb_data(bcb).leader_bb()
-    }
+        // Process the counters associated with BCB edges.
+        for (from_bcb, to_bcb, counter_kind) in self.coverage_counters.bcb_edge_counters() {
+            let do_inject = match counter_kind {
+                // Counter-increment statements always need to be injected.
+                BcbCounter::Counter { .. } => true,
+                // BCB-edge expressions never have mappings, so they never need
+                // a corresponding statement.
+                BcbCounter::Expression { .. } => false,
+            };
+            if !do_inject {
+                continue;
+            }
 
-    #[inline]
-    fn bcb_last_bb(&self, bcb: BasicCoverageBlock) -> BasicBlock {
-        self.bcb_data(bcb).last_bb()
-    }
+            // We need to inject a coverage statement into a new BB between the
+            // last BB of `from_bcb` and the first BB of `to_bcb`.
+            let from_bb = self.basic_coverage_blocks[from_bcb].last_bb();
+            let to_bb = self.basic_coverage_blocks[to_bcb].leader_bb();
+
+            let new_bb = inject_edge_counter_basic_block(self.mir_body, from_bb, to_bb);
+            debug!(
+                "Edge {from_bcb:?} (last {from_bb:?}) -> {to_bcb:?} (leader {to_bb:?}) \
+                requires a new MIR BasicBlock {new_bb:?} for edge counter {counter_kind:?}",
+            );
+
+            // Inject a counter into the newly-created BB.
+            inject_statement(self.mir_body, self.make_mir_coverage_kind(&counter_kind), new_bb);
+        }
 
-    #[inline]
-    fn bcb_data(&self, bcb: BasicCoverageBlock) -> &BasicCoverageBlockData {
-        &self.basic_coverage_blocks[bcb]
+        mappings
     }
 
     fn make_mir_coverage_kind(&self, counter_kind: &BcbCounter) -> CoverageKind {
         match *counter_kind {
-            BcbCounter::Counter { id } => {
-                CoverageKind::Counter { function_source_hash: self.function_source_hash, id }
-            }
-            BcbCounter::Expression { id, lhs, op, rhs } => {
-                CoverageKind::Expression { id, lhs, op, rhs }
-            }
+            BcbCounter::Counter { id } => CoverageKind::CounterIncrement { id },
+            BcbCounter::Expression { id } => CoverageKind::ExpressionUsed { id },
         }
     }
 }
@@ -364,42 +292,17 @@ fn inject_edge_counter_basic_block(
     new_bb
 }
 
-fn inject_statement(
-    mir_body: &mut mir::Body<'_>,
-    counter_kind: CoverageKind,
-    bb: BasicBlock,
-    some_code_region: Option<CodeRegion>,
-) {
-    debug!(
-        "  injecting statement {:?} for {:?} at code region: {:?}",
-        counter_kind, bb, some_code_region
-    );
+fn inject_statement(mir_body: &mut mir::Body<'_>, counter_kind: CoverageKind, bb: BasicBlock) {
+    debug!("  injecting statement {counter_kind:?} for {bb:?}");
     let data = &mut mir_body[bb];
     let source_info = data.terminator().source_info;
     let statement = Statement {
         source_info,
-        kind: StatementKind::Coverage(Box::new(Coverage {
-            kind: counter_kind,
-            code_region: some_code_region,
-        })),
+        kind: StatementKind::Coverage(Box::new(Coverage { kind: counter_kind })),
     };
     data.statements.insert(0, statement);
 }
 
-// Non-code expressions are injected into the coverage map, without generating executable code.
-fn inject_intermediate_expression(mir_body: &mut mir::Body<'_>, expression: CoverageKind) {
-    debug_assert!(matches!(expression, CoverageKind::Expression { .. }));
-    debug!("  injecting non-code expression {:?}", expression);
-    let inject_in_bb = mir::START_BLOCK;
-    let data = &mut mir_body[inject_in_bb];
-    let source_info = data.terminator().source_info;
-    let statement = Statement {
-        source_info,
-        kind: StatementKind::Coverage(Box::new(Coverage { kind: expression, code_region: None })),
-    };
-    data.statements.push(statement);
-}
-
 /// Convert the Span into its file name, start line and column, and end line and column
 fn make_code_region(
     source_map: &SourceMap,
diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs
index 56365c5d474..809407f897d 100644
--- a/compiler/rustc_mir_transform/src/coverage/query.rs
+++ b/compiler/rustc_mir_transform/src/coverage/query.rs
@@ -2,100 +2,31 @@ use super::*;
 
 use rustc_data_structures::captures::Captures;
 use rustc_middle::mir::coverage::*;
-use rustc_middle::mir::{self, Body, Coverage, CoverageInfo};
+use rustc_middle::mir::{Body, Coverage, CoverageIdsInfo};
 use rustc_middle::query::Providers;
 use rustc_middle::ty::{self, TyCtxt};
-use rustc_span::def_id::DefId;
 
 /// A `query` provider for retrieving coverage information injected into MIR.
 pub(crate) fn provide(providers: &mut Providers) {
-    providers.coverageinfo = |tcx, def_id| coverageinfo(tcx, def_id);
-    providers.covered_code_regions = |tcx, def_id| covered_code_regions(tcx, def_id);
+    providers.coverage_ids_info = |tcx, def_id| coverage_ids_info(tcx, def_id);
 }
 
-/// Coverage codegen needs to know the total number of counter IDs and expression IDs that have
-/// been used by a function's coverage mappings. These totals are used to create vectors to hold
-/// the relevant counter and expression data, and the maximum counter ID (+ 1) is also needed by
-/// the `llvm.instrprof.increment` intrinsic.
-///
-/// MIR optimization may split and duplicate some BasicBlock sequences, or optimize out some code
-/// including injected counters. (It is OK if some counters are optimized out, but those counters
-/// are still included in the total `num_counters` or `num_expressions`.) Simply counting the
-/// calls may not work; but computing the number of counters or expressions by adding `1` to the
-/// highest ID (for a given instrumented function) is valid.
-///
-/// It's possible for a coverage expression to remain in MIR while one or both of its operands
-/// have been optimized away. To avoid problems in codegen, we include those operands' IDs when
-/// determining the maximum counter/expression ID, even if the underlying counter/expression is
-/// no longer present.
-struct CoverageVisitor {
-    max_counter_id: CounterId,
-    max_expression_id: ExpressionId,
-}
-
-impl CoverageVisitor {
-    /// Updates `max_counter_id` to the maximum encountered counter ID.
-    #[inline(always)]
-    fn update_max_counter_id(&mut self, counter_id: CounterId) {
-        self.max_counter_id = self.max_counter_id.max(counter_id);
-    }
-
-    /// Updates `max_expression_id` to the maximum encountered expression ID.
-    #[inline(always)]
-    fn update_max_expression_id(&mut self, expression_id: ExpressionId) {
-        self.max_expression_id = self.max_expression_id.max(expression_id);
-    }
-
-    fn update_from_expression_operand(&mut self, operand: Operand) {
-        match operand {
-            Operand::Counter(id) => self.update_max_counter_id(id),
-            Operand::Expression(id) => self.update_max_expression_id(id),
-            Operand::Zero => {}
-        }
-    }
-
-    fn visit_body(&mut self, body: &Body<'_>) {
-        for coverage in all_coverage_in_mir_body(body) {
-            self.visit_coverage(coverage);
-        }
-    }
-
-    fn visit_coverage(&mut self, coverage: &Coverage) {
-        match coverage.kind {
-            CoverageKind::Counter { id, .. } => self.update_max_counter_id(id),
-            CoverageKind::Expression { id, lhs, rhs, .. } => {
-                self.update_max_expression_id(id);
-                self.update_from_expression_operand(lhs);
-                self.update_from_expression_operand(rhs);
-            }
-            CoverageKind::Unreachable => {}
-        }
-    }
-}
-
-fn coverageinfo<'tcx>(tcx: TyCtxt<'tcx>, instance_def: ty::InstanceDef<'tcx>) -> CoverageInfo {
+/// Query implementation for `coverage_ids_info`.
+fn coverage_ids_info<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    instance_def: ty::InstanceDef<'tcx>,
+) -> CoverageIdsInfo {
     let mir_body = tcx.instance_mir(instance_def);
 
-    let mut coverage_visitor = CoverageVisitor {
-        max_counter_id: CounterId::START,
-        max_expression_id: ExpressionId::START,
-    };
-
-    coverage_visitor.visit_body(mir_body);
-
-    // Add 1 to the highest IDs to get the total number of IDs.
-    CoverageInfo {
-        num_counters: (coverage_visitor.max_counter_id + 1).as_u32(),
-        num_expressions: (coverage_visitor.max_expression_id + 1).as_u32(),
-    }
-}
+    let max_counter_id = all_coverage_in_mir_body(mir_body)
+        .filter_map(|coverage| match coverage.kind {
+            CoverageKind::CounterIncrement { id } => Some(id),
+            _ => None,
+        })
+        .max()
+        .unwrap_or(CounterId::START);
 
-fn covered_code_regions(tcx: TyCtxt<'_>, def_id: DefId) -> Vec<&CodeRegion> {
-    let body = mir_body(tcx, def_id);
-    all_coverage_in_mir_body(body)
-        // Not all coverage statements have an attached code region.
-        .filter_map(|coverage| coverage.code_region.as_ref())
-        .collect()
+    CoverageIdsInfo { max_counter_id }
 }
 
 fn all_coverage_in_mir_body<'a, 'tcx>(
@@ -115,11 +46,3 @@ fn is_inlined(body: &Body<'_>, statement: &Statement<'_>) -> bool {
     let scope_data = &body.source_scopes[statement.source_info.scope];
     scope_data.inlined.is_some() || scope_data.inlined_parent_scope.is_some()
 }
-
-/// This function ensures we obtain the correct MIR for the given item irrespective of
-/// whether that means const mir or runtime mir. For `const fn` this opts for runtime
-/// mir.
-fn mir_body(tcx: TyCtxt<'_>, def_id: DefId) -> &mir::Body<'_> {
-    let def = ty::InstanceDef::Item(def_id);
-    tcx.instance_mir(def)
-}
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
index 767f8e9f4fa..704eea413e1 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -1,26 +1,48 @@
-use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph, START_BCB};
+use std::cell::OnceCell;
 
 use rustc_data_structures::graph::WithNumNodes;
-use rustc_middle::mir::{
-    self, AggregateKind, BasicBlock, FakeReadCause, Rvalue, Statement, StatementKind, Terminator,
-    TerminatorKind,
-};
-use rustc_span::source_map::original_sp;
-use rustc_span::{BytePos, ExpnKind, MacroKind, Span, Symbol};
+use rustc_index::IndexVec;
+use rustc_middle::mir;
+use rustc_span::{BytePos, ExpnKind, MacroKind, Span, Symbol, DUMMY_SP};
 
-use std::cell::OnceCell;
+use super::graph::{BasicCoverageBlock, CoverageGraph, START_BCB};
+
+mod from_mir;
 
-#[derive(Debug, Copy, Clone)]
-pub(super) enum CoverageStatement {
-    Statement(BasicBlock, Span, usize),
-    Terminator(BasicBlock, Span),
+pub(super) struct CoverageSpans {
+    /// Map from BCBs to their list of coverage spans.
+    bcb_to_spans: IndexVec<BasicCoverageBlock, Vec<Span>>,
 }
 
-impl CoverageStatement {
-    pub fn span(&self) -> Span {
-        match self {
-            Self::Statement(_, span, _) | Self::Terminator(_, span) => *span,
+impl CoverageSpans {
+    pub(super) fn generate_coverage_spans(
+        mir_body: &mir::Body<'_>,
+        fn_sig_span: Span,
+        body_span: Span,
+        basic_coverage_blocks: &CoverageGraph,
+    ) -> Self {
+        let coverage_spans = CoverageSpansGenerator::generate_coverage_spans(
+            mir_body,
+            fn_sig_span,
+            body_span,
+            basic_coverage_blocks,
+        );
+
+        // Group the coverage spans by BCB, with the BCBs in sorted order.
+        let mut bcb_to_spans = IndexVec::from_elem_n(Vec::new(), basic_coverage_blocks.num_nodes());
+        for CoverageSpan { bcb, span, .. } in coverage_spans {
+            bcb_to_spans[bcb].push(span);
         }
+
+        Self { bcb_to_spans }
+    }
+
+    pub(super) fn bcb_has_coverage_spans(&self, bcb: BasicCoverageBlock) -> bool {
+        !self.bcb_to_spans[bcb].is_empty()
+    }
+
+    pub(super) fn spans_for_bcb(&self, bcb: BasicCoverageBlock) -> &[Span] {
+        &self.bcb_to_spans[bcb]
     }
 }
 
@@ -28,87 +50,55 @@ impl CoverageStatement {
 /// references the originating BCB and one or more MIR `Statement`s and/or `Terminator`s.
 /// Initially, the `Span`s come from the `Statement`s and `Terminator`s, but subsequent
 /// transforms can combine adjacent `Span`s and `CoverageSpan` from the same BCB, merging the
-/// `CoverageStatement` vectors, and the `Span`s to cover the extent of the combined `Span`s.
+/// `merged_spans` vectors, and the `Span`s to cover the extent of the combined `Span`s.
 ///
-/// Note: A `CoverageStatement` merged into another CoverageSpan may come from a `BasicBlock` that
+/// Note: A span merged into another CoverageSpan may come from a `BasicBlock` that
 /// is not part of the `CoverageSpan` bcb if the statement was included because it's `Span` matches
 /// or is subsumed by the `Span` associated with this `CoverageSpan`, and it's `BasicBlock`
 /// `dominates()` the `BasicBlock`s in this `CoverageSpan`.
 #[derive(Debug, Clone)]
-pub(super) struct CoverageSpan {
+struct CoverageSpan {
     pub span: Span,
     pub expn_span: Span,
     pub current_macro_or_none: OnceCell<Option<Symbol>>,
     pub bcb: BasicCoverageBlock,
-    pub coverage_statements: Vec<CoverageStatement>,
+    /// List of all the original spans from MIR that have been merged into this
+    /// span. Mainly used to precisely skip over gaps when truncating a span.
+    pub merged_spans: Vec<Span>,
     pub is_closure: bool,
 }
 
 impl CoverageSpan {
     pub fn for_fn_sig(fn_sig_span: Span) -> Self {
-        Self {
-            span: fn_sig_span,
-            expn_span: fn_sig_span,
-            current_macro_or_none: Default::default(),
-            bcb: START_BCB,
-            coverage_statements: vec![],
-            is_closure: false,
-        }
+        Self::new(fn_sig_span, fn_sig_span, START_BCB, false)
     }
 
-    pub fn for_statement(
-        statement: &Statement<'_>,
+    pub(super) fn new(
         span: Span,
         expn_span: Span,
         bcb: BasicCoverageBlock,
-        bb: BasicBlock,
-        stmt_index: usize,
+        is_closure: bool,
     ) -> Self {
-        let is_closure = match statement.kind {
-            StatementKind::Assign(box (_, Rvalue::Aggregate(box ref kind, _))) => {
-                matches!(kind, AggregateKind::Closure(_, _) | AggregateKind::Generator(_, _, _))
-            }
-            _ => false,
-        };
-
         Self {
             span,
             expn_span,
             current_macro_or_none: Default::default(),
             bcb,
-            coverage_statements: vec![CoverageStatement::Statement(bb, span, stmt_index)],
+            merged_spans: vec![span],
             is_closure,
         }
     }
 
-    pub fn for_terminator(
-        span: Span,
-        expn_span: Span,
-        bcb: BasicCoverageBlock,
-        bb: BasicBlock,
-    ) -> Self {
-        Self {
-            span,
-            expn_span,
-            current_macro_or_none: Default::default(),
-            bcb,
-            coverage_statements: vec![CoverageStatement::Terminator(bb, span)],
-            is_closure: false,
-        }
-    }
-
     pub fn merge_from(&mut self, mut other: CoverageSpan) {
         debug_assert!(self.is_mergeable(&other));
         self.span = self.span.to(other.span);
-        self.coverage_statements.append(&mut other.coverage_statements);
+        self.merged_spans.append(&mut other.merged_spans);
     }
 
     pub fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) {
-        self.coverage_statements.retain(|covstmt| covstmt.span().hi() <= cutoff_pos);
-        if let Some(highest_covstmt) =
-            self.coverage_statements.iter().max_by_key(|covstmt| covstmt.span().hi())
-        {
-            self.span = self.span.with_hi(highest_covstmt.span().hi());
+        self.merged_spans.retain(|span| span.hi() <= cutoff_pos);
+        if let Some(max_hi) = self.merged_spans.iter().map(|span| span.hi()).max() {
+            self.span = self.span.with_hi(max_hi);
         }
     }
 
@@ -139,11 +129,12 @@ impl CoverageSpan {
     /// If the span is part of a macro, and the macro is visible (expands directly to the given
     /// body_span), returns the macro name symbol.
     pub fn visible_macro(&self, body_span: Span) -> Option<Symbol> {
-        if let Some(current_macro) = self.current_macro() && self
-            .expn_span
-            .parent_callsite()
-            .unwrap_or_else(|| bug!("macro must have a parent"))
-            .eq_ctxt(body_span)
+        if let Some(current_macro) = self.current_macro()
+            && self
+                .expn_span
+                .parent_callsite()
+                .unwrap_or_else(|| bug!("macro must have a parent"))
+                .eq_ctxt(body_span)
         {
             return Some(current_macro);
         }
@@ -162,13 +153,7 @@ impl CoverageSpan {
 ///  * Merge spans that represent continuous (both in source code and control flow), non-branching
 ///    execution
 ///  * Carve out (leave uncovered) any span that will be counted by another MIR (notably, closures)
-pub struct CoverageSpans<'a, 'tcx> {
-    /// The MIR, used to look up `BasicBlockData`.
-    mir_body: &'a mir::Body<'tcx>,
-
-    /// A `Span` covering the signature of function for the MIR.
-    fn_sig_span: Span,
-
+struct CoverageSpansGenerator<'a> {
     /// A `Span` covering the function body of the MIR (typically from left curly brace to right
     /// curly brace).
     body_span: Span,
@@ -178,7 +163,7 @@ pub struct CoverageSpans<'a, 'tcx> {
 
     /// The initial set of `CoverageSpan`s, sorted by `Span` (`lo` and `hi`) and by relative
     /// dominance between the `BasicCoverageBlock`s of equal `Span`s.
-    sorted_spans_iter: Option<std::vec::IntoIter<CoverageSpan>>,
+    sorted_spans_iter: std::vec::IntoIter<CoverageSpan>,
 
     /// The current `CoverageSpan` to compare to its `prev`, to possibly merge, discard, force the
     /// discard of the `prev` (and or `pending_dups`), or keep both (with `prev` moved to
@@ -200,9 +185,6 @@ pub struct CoverageSpans<'a, 'tcx> {
     /// is mutated.
     prev_original_span: Span,
 
-    /// A copy of the expn_span from the prior iteration.
-    prev_expn_span: Option<Span>,
-
     /// One or more `CoverageSpan`s with the same `Span` but different `BasicCoverageBlock`s, and
     /// no `BasicCoverageBlock` in this list dominates another `BasicCoverageBlock` in the list.
     /// If a new `curr` span also fits this criteria (compared to an existing list of
@@ -218,7 +200,7 @@ pub struct CoverageSpans<'a, 'tcx> {
     refined_spans: Vec<CoverageSpan>,
 }
 
-impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
+impl<'a> CoverageSpansGenerator<'a> {
     /// Generate a minimal set of `CoverageSpan`s, each representing a contiguous code region to be
     /// counted.
     ///
@@ -241,109 +223,79 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
     /// Note the resulting vector of `CoverageSpan`s may not be fully sorted (and does not need
     /// to be).
     pub(super) fn generate_coverage_spans(
-        mir_body: &'a mir::Body<'tcx>,
+        mir_body: &mir::Body<'_>,
         fn_sig_span: Span, // Ensured to be same SourceFile and SyntaxContext as `body_span`
         body_span: Span,
         basic_coverage_blocks: &'a CoverageGraph,
     ) -> Vec<CoverageSpan> {
-        let mut coverage_spans = CoverageSpans {
+        let sorted_spans = from_mir::mir_to_initial_sorted_coverage_spans(
             mir_body,
             fn_sig_span,
             body_span,
             basic_coverage_blocks,
-            sorted_spans_iter: None,
-            refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2),
+        );
+
+        let coverage_spans = Self {
+            body_span,
+            basic_coverage_blocks,
+            sorted_spans_iter: sorted_spans.into_iter(),
             some_curr: None,
-            curr_original_span: Span::with_root_ctxt(BytePos(0), BytePos(0)),
+            curr_original_span: DUMMY_SP,
             some_prev: None,
-            prev_original_span: Span::with_root_ctxt(BytePos(0), BytePos(0)),
-            prev_expn_span: None,
+            prev_original_span: DUMMY_SP,
             pending_dups: Vec::new(),
+            refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2),
         };
 
-        let sorted_spans = coverage_spans.mir_to_initial_sorted_coverage_spans();
-
-        coverage_spans.sorted_spans_iter = Some(sorted_spans.into_iter());
-
         coverage_spans.to_refined_spans()
     }
 
-    fn mir_to_initial_sorted_coverage_spans(&self) -> Vec<CoverageSpan> {
-        let mut initial_spans =
-            Vec::<CoverageSpan>::with_capacity(self.mir_body.basic_blocks.len() * 2);
-        for (bcb, bcb_data) in self.basic_coverage_blocks.iter_enumerated() {
-            initial_spans.extend(self.bcb_to_initial_coverage_spans(bcb, bcb_data));
-        }
-
-        if initial_spans.is_empty() {
-            // This can happen if, for example, the function is unreachable (contains only a
-            // `BasicBlock`(s) with an `Unreachable` terminator).
-            return initial_spans;
-        }
-
-        initial_spans.push(CoverageSpan::for_fn_sig(self.fn_sig_span));
-
-        initial_spans.sort_by(|a, b| {
-            // First sort by span start.
-            Ord::cmp(&a.span.lo(), &b.span.lo())
-                // If span starts are the same, sort by span end in reverse order.
-                // This ensures that if spans A and B are adjacent in the list,
-                // and they overlap but are not equal, then either:
-                // - Span A extends further left, or
-                // - Both have the same start and span A extends further right
-                .then_with(|| Ord::cmp(&a.span.hi(), &b.span.hi()).reverse())
-                // If both spans are equal, sort the BCBs in dominator order,
-                // so that dominating BCBs come before other BCBs they dominate.
-                .then_with(|| self.basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb))
-                // If two spans are otherwise identical, put closure spans first,
-                // as this seems to be what the refinement step expects.
-                .then_with(|| Ord::cmp(&a.is_closure, &b.is_closure).reverse())
-        });
-
-        initial_spans
-    }
-
     /// Iterate through the sorted `CoverageSpan`s, and return the refined list of merged and
     /// de-duplicated `CoverageSpan`s.
     fn to_refined_spans(mut self) -> Vec<CoverageSpan> {
         while self.next_coverage_span() {
+            // For the first span we don't have `prev` set, so most of the
+            // span-processing steps don't make sense yet.
             if self.some_prev.is_none() {
                 debug!("  initial span");
-                self.check_invoked_macro_name_span();
-            } else if self.curr().is_mergeable(self.prev()) {
-                debug!("  same bcb (and neither is a closure), merge with prev={:?}", self.prev());
+                self.maybe_push_macro_name_span();
+                continue;
+            }
+
+            // The remaining cases assume that `prev` and `curr` are set.
+            let prev = self.prev();
+            let curr = self.curr();
+
+            if curr.is_mergeable(prev) {
+                debug!("  same bcb (and neither is a closure), merge with prev={prev:?}");
                 let prev = self.take_prev();
                 self.curr_mut().merge_from(prev);
-                self.check_invoked_macro_name_span();
+                self.maybe_push_macro_name_span();
             // Note that curr.span may now differ from curr_original_span
-            } else if self.prev_ends_before_curr() {
+            } else if prev.span.hi() <= curr.span.lo() {
                 debug!(
-                    "  different bcbs and disjoint spans, so keep curr for next iter, and add \
-                    prev={:?}",
-                    self.prev()
+                    "  different bcbs and disjoint spans, so keep curr for next iter, and add prev={prev:?}",
                 );
                 let prev = self.take_prev();
                 self.push_refined_span(prev);
-                self.check_invoked_macro_name_span();
-            } else if self.prev().is_closure {
+                self.maybe_push_macro_name_span();
+            } else if prev.is_closure {
                 // drop any equal or overlapping span (`curr`) and keep `prev` to test again in the
                 // next iter
                 debug!(
-                    "  curr overlaps a closure (prev). Drop curr and keep prev for next iter. \
-                    prev={:?}",
-                    self.prev()
+                    "  curr overlaps a closure (prev). Drop curr and keep prev for next iter. prev={prev:?}",
                 );
-                self.take_curr();
-            } else if self.curr().is_closure {
+                self.take_curr(); // Discards curr.
+            } else if curr.is_closure {
                 self.carve_out_span_for_closure();
-            } else if self.prev_original_span == self.curr().span {
+            } else if self.prev_original_span == curr.span {
                 // Note that this compares the new (`curr`) span to `prev_original_span`.
                 // In this branch, the actual span byte range of `prev_original_span` is not
                 // important. What is important is knowing whether the new `curr` span was
                 // **originally** the same as the original span of `prev()`. The original spans
                 // reflect their original sort order, and for equal spans, conveys a partial
                 // ordering based on CFG dominator priority.
-                if self.prev().is_macro_expansion() && self.curr().is_macro_expansion() {
+                if prev.is_macro_expansion() && curr.is_macro_expansion() {
                     // Macros that expand to include branching (such as
                     // `assert_eq!()`, `assert_ne!()`, `info!()`, `debug!()`, or
                     // `trace!()`) typically generate callee spans with identical
@@ -357,23 +309,24 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
                     debug!(
                         "  curr and prev are part of a macro expansion, and curr has the same span \
                         as prev, but is in a different bcb. Drop curr and keep prev for next iter. \
-                        prev={:?}",
-                        self.prev()
+                        prev={prev:?}",
                     );
-                    self.take_curr();
+                    self.take_curr(); // Discards curr.
                 } else {
-                    self.hold_pending_dups_unless_dominated();
+                    self.update_pending_dups();
                 }
             } else {
                 self.cutoff_prev_at_overlapping_curr();
-                self.check_invoked_macro_name_span();
+                self.maybe_push_macro_name_span();
             }
         }
 
-        debug!("    AT END, adding last prev={:?}", self.prev());
         let prev = self.take_prev();
-        let pending_dups = self.pending_dups.split_off(0);
-        for dup in pending_dups {
+        debug!("    AT END, adding last prev={prev:?}");
+
+        // Take `pending_dups` so that we can drain it while calling self methods.
+        // It is never used as a field after this point.
+        for dup in std::mem::take(&mut self.pending_dups) {
             debug!("    ...adding at least one pending dup={:?}", dup);
             self.push_refined_span(dup);
         }
@@ -403,85 +356,40 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
     }
 
     fn push_refined_span(&mut self, covspan: CoverageSpan) {
-        let len = self.refined_spans.len();
-        if len > 0 {
-            let last = &mut self.refined_spans[len - 1];
-            if last.is_mergeable(&covspan) {
-                debug!(
-                    "merging new refined span with last refined span, last={:?}, covspan={:?}",
-                    last, covspan
-                );
-                last.merge_from(covspan);
-                return;
-            }
+        if let Some(last) = self.refined_spans.last_mut()
+            && last.is_mergeable(&covspan)
+        {
+            // Instead of pushing the new span, merge it with the last refined span.
+            debug!(?last, ?covspan, "merging new refined span with last refined span");
+            last.merge_from(covspan);
+        } else {
+            self.refined_spans.push(covspan);
         }
-        self.refined_spans.push(covspan)
     }
 
-    fn check_invoked_macro_name_span(&mut self) {
-        if let Some(visible_macro) = self.curr().visible_macro(self.body_span) {
-            if !self
-                .prev_expn_span
-                .is_some_and(|prev_expn_span| self.curr().expn_span.ctxt() == prev_expn_span.ctxt())
-            {
-                let merged_prefix_len = self.curr_original_span.lo() - self.curr().span.lo();
-                let after_macro_bang =
-                    merged_prefix_len + BytePos(visible_macro.as_str().len() as u32 + 1);
-                let mut macro_name_cov = self.curr().clone();
-                self.curr_mut().span =
-                    self.curr().span.with_lo(self.curr().span.lo() + after_macro_bang);
-                macro_name_cov.span =
-                    macro_name_cov.span.with_hi(macro_name_cov.span.lo() + after_macro_bang);
-                debug!(
-                    "  and curr starts a new macro expansion, so add a new span just for \
-                            the macro `{}!`, new span={:?}",
-                    visible_macro, macro_name_cov
-                );
-                self.push_refined_span(macro_name_cov);
-            }
+    /// If `curr` is part of a new macro expansion, carve out and push a separate
+    /// span that ends just after the macro name and its subsequent `!`.
+    fn maybe_push_macro_name_span(&mut self) {
+        let curr = self.curr();
+
+        let Some(visible_macro) = curr.visible_macro(self.body_span) else { return };
+        if let Some(prev) = &self.some_prev
+            && prev.expn_span.eq_ctxt(curr.expn_span)
+        {
+            return;
         }
-    }
 
-    // Generate a set of `CoverageSpan`s from the filtered set of `Statement`s and `Terminator`s of
-    // the `BasicBlock`(s) in the given `BasicCoverageBlockData`. One `CoverageSpan` is generated
-    // for each `Statement` and `Terminator`. (Note that subsequent stages of coverage analysis will
-    // merge some `CoverageSpan`s, at which point a `CoverageSpan` may represent multiple
-    // `Statement`s and/or `Terminator`s.)
-    fn bcb_to_initial_coverage_spans(
-        &self,
-        bcb: BasicCoverageBlock,
-        bcb_data: &'a BasicCoverageBlockData,
-    ) -> Vec<CoverageSpan> {
-        bcb_data
-            .basic_blocks
-            .iter()
-            .flat_map(|&bb| {
-                let data = &self.mir_body[bb];
-                data.statements
-                    .iter()
-                    .enumerate()
-                    .filter_map(move |(index, statement)| {
-                        filtered_statement_span(statement).map(|span| {
-                            CoverageSpan::for_statement(
-                                statement,
-                                function_source_span(span, self.body_span),
-                                span,
-                                bcb,
-                                bb,
-                                index,
-                            )
-                        })
-                    })
-                    .chain(filtered_terminator_span(data.terminator()).map(|span| {
-                        CoverageSpan::for_terminator(
-                            function_source_span(span, self.body_span),
-                            span,
-                            bcb,
-                            bb,
-                        )
-                    }))
-            })
-            .collect()
+        let merged_prefix_len = self.curr_original_span.lo() - curr.span.lo();
+        let after_macro_bang = merged_prefix_len + BytePos(visible_macro.as_str().len() as u32 + 1);
+        let mut macro_name_cov = curr.clone();
+        self.curr_mut().span = curr.span.with_lo(curr.span.lo() + after_macro_bang);
+        macro_name_cov.span =
+            macro_name_cov.span.with_hi(macro_name_cov.span.lo() + after_macro_bang);
+        debug!(
+            "  and curr starts a new macro expansion, so add a new span just for \
+            the macro `{visible_macro}!`, new span={macro_name_cov:?}",
+        );
+        self.push_refined_span(macro_name_cov);
     }
 
     fn curr(&self) -> &CoverageSpan {
@@ -496,6 +404,12 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
             .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
     }
 
+    /// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the
+    /// `curr` coverage span.
+    fn take_curr(&mut self) -> CoverageSpan {
+        self.some_curr.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
+    }
+
     fn prev(&self) -> &CoverageSpan {
         self.some_prev
             .as_ref()
@@ -521,82 +435,78 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
     ///     `pending_dups` could have as few as one span)
     /// In either case, no more spans will match the span of `pending_dups`, so
     /// add the `pending_dups` if they don't overlap `curr`, and clear the list.
-    fn check_pending_dups(&mut self) {
-        if let Some(dup) = self.pending_dups.last() && dup.span != self.prev().span {
-            debug!(
-                "    SAME spans, but pending_dups are NOT THE SAME, so BCBs matched on \
-                previous iteration, or prev started a new disjoint span"
-            );
-            if dup.span.hi() <= self.curr().span.lo() {
-                let pending_dups = self.pending_dups.split_off(0);
-                for dup in pending_dups.into_iter() {
-                    debug!("    ...adding at least one pending={:?}", dup);
-                    self.push_refined_span(dup);
-                }
-            } else {
-                self.pending_dups.clear();
+    fn maybe_flush_pending_dups(&mut self) {
+        let Some(last_dup) = self.pending_dups.last() else { return };
+        if last_dup.span == self.prev().span {
+            return;
+        }
+
+        debug!(
+            "    SAME spans, but pending_dups are NOT THE SAME, so BCBs matched on \
+            previous iteration, or prev started a new disjoint span"
+        );
+        if last_dup.span.hi() <= self.curr().span.lo() {
+            // Temporarily steal `pending_dups` into a local, so that we can
+            // drain it while calling other self methods.
+            let mut pending_dups = std::mem::take(&mut self.pending_dups);
+            for dup in pending_dups.drain(..) {
+                debug!("    ...adding at least one pending={:?}", dup);
+                self.push_refined_span(dup);
             }
+            // The list of dups is now empty, but we can recycle its capacity.
+            assert!(pending_dups.is_empty() && self.pending_dups.is_empty());
+            self.pending_dups = pending_dups;
+        } else {
+            self.pending_dups.clear();
         }
     }
 
     /// Advance `prev` to `curr` (if any), and `curr` to the next `CoverageSpan` in sorted order.
     fn next_coverage_span(&mut self) -> bool {
         if let Some(curr) = self.some_curr.take() {
-            self.prev_expn_span = Some(curr.expn_span);
             self.some_prev = Some(curr);
             self.prev_original_span = self.curr_original_span;
         }
-        while let Some(curr) = self.sorted_spans_iter.as_mut().unwrap().next() {
+        while let Some(curr) = self.sorted_spans_iter.next() {
             debug!("FOR curr={:?}", curr);
-            if self.some_prev.is_some() && self.prev_starts_after_next(&curr) {
+            if let Some(prev) = &self.some_prev && prev.span.lo() > curr.span.lo() {
+                // Skip curr because prev has already advanced beyond the end of curr.
+                // This can only happen if a prior iteration updated `prev` to skip past
+                // a region of code, such as skipping past a closure.
                 debug!(
                     "  prev.span starts after curr.span, so curr will be dropped (skipping past \
-                    closure?); prev={:?}",
-                    self.prev()
+                    closure?); prev={prev:?}",
                 );
             } else {
                 // Save a copy of the original span for `curr` in case the `CoverageSpan` is changed
                 // by `self.curr_mut().merge_from(prev)`.
                 self.curr_original_span = curr.span;
                 self.some_curr.replace(curr);
-                self.check_pending_dups();
+                self.maybe_flush_pending_dups();
                 return true;
             }
         }
         false
     }
 
-    /// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the
-    /// `curr` coverage span.
-    fn take_curr(&mut self) -> CoverageSpan {
-        self.some_curr.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
-    }
-
-    /// Returns true if the curr span should be skipped because prev has already advanced beyond the
-    /// end of curr. This can only happen if a prior iteration updated `prev` to skip past a region
-    /// of code, such as skipping past a closure.
-    fn prev_starts_after_next(&self, next_curr: &CoverageSpan) -> bool {
-        self.prev().span.lo() > next_curr.span.lo()
-    }
-
-    /// Returns true if the curr span starts past the end of the prev span, which means they don't
-    /// overlap, so we now know the prev can be added to the refined coverage spans.
-    fn prev_ends_before_curr(&self) -> bool {
-        self.prev().span.hi() <= self.curr().span.lo()
-    }
-
     /// If `prev`s span extends left of the closure (`curr`), carve out the closure's span from
     /// `prev`'s span. (The closure's coverage counters will be injected when processing the
     /// closure's own MIR.) Add the portion of the span to the left of the closure; and if the span
     /// extends to the right of the closure, update `prev` to that portion of the span. For any
     /// `pending_dups`, repeat the same process.
     fn carve_out_span_for_closure(&mut self) {
-        let curr_span = self.curr().span;
-        let left_cutoff = curr_span.lo();
-        let right_cutoff = curr_span.hi();
-        let has_pre_closure_span = self.prev().span.lo() < right_cutoff;
-        let has_post_closure_span = self.prev().span.hi() > right_cutoff;
-        let mut pending_dups = self.pending_dups.split_off(0);
+        let prev = self.prev();
+        let curr = self.curr();
+
+        let left_cutoff = curr.span.lo();
+        let right_cutoff = curr.span.hi();
+        let has_pre_closure_span = prev.span.lo() < right_cutoff;
+        let has_post_closure_span = prev.span.hi() > right_cutoff;
+
+        // Temporarily steal `pending_dups` into a local, so that we can
+        // mutate and/or drain it while calling other self methods.
+        let mut pending_dups = std::mem::take(&mut self.pending_dups);
+
         if has_pre_closure_span {
             let mut pre_closure = self.prev().clone();
             pre_closure.span = pre_closure.span.with_hi(left_cutoff);
@@ -610,6 +520,7 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
             }
             self.push_refined_span(pre_closure);
         }
+
         if has_post_closure_span {
             // Mutate `prev.span()` to start after the closure (and discard curr).
             // (**NEVER** update `prev_original_span` because it affects the assumptions
@@ -620,12 +531,15 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
                 debug!("    ...and at least one overlapping dup={:?}", dup);
                 dup.span = dup.span.with_lo(right_cutoff);
             }
-            self.pending_dups.append(&mut pending_dups);
-            let closure_covspan = self.take_curr();
+            let closure_covspan = self.take_curr(); // Prevent this curr from becoming prev.
             self.push_refined_span(closure_covspan); // since self.prev() was already updated
         } else {
             pending_dups.clear();
         }
+
+        // Restore the modified post-closure spans, or the empty vector's capacity.
+        assert!(self.pending_dups.is_empty());
+        self.pending_dups = pending_dups;
     }
 
     /// Called if `curr.span` equals `prev_original_span` (and potentially equal to all
@@ -642,26 +556,28 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
     /// neither `CoverageSpan` dominates the other, both (or possibly more than two) are held,
     /// until their disposition is determined. In this latter case, the `prev` dup is moved into
     /// `pending_dups` so the new `curr` dup can be moved to `prev` for the next iteration.
-    fn hold_pending_dups_unless_dominated(&mut self) {
+    fn update_pending_dups(&mut self) {
+        let prev_bcb = self.prev().bcb;
+        let curr_bcb = self.curr().bcb;
+
         // Equal coverage spans are ordered by dominators before dominated (if any), so it should be
         // impossible for `curr` to dominate any previous `CoverageSpan`.
-        debug_assert!(!self.span_bcb_dominates(self.curr(), self.prev()));
+        debug_assert!(!self.basic_coverage_blocks.dominates(curr_bcb, prev_bcb));
 
         let initial_pending_count = self.pending_dups.len();
         if initial_pending_count > 0 {
-            let mut pending_dups = self.pending_dups.split_off(0);
-            pending_dups.retain(|dup| !self.span_bcb_dominates(dup, self.curr()));
-            self.pending_dups.append(&mut pending_dups);
-            if self.pending_dups.len() < initial_pending_count {
+            self.pending_dups
+                .retain(|dup| !self.basic_coverage_blocks.dominates(dup.bcb, curr_bcb));
+
+            let n_discarded = initial_pending_count - self.pending_dups.len();
+            if n_discarded > 0 {
                 debug!(
-                    "  discarded {} of {} pending_dups that dominated curr",
-                    initial_pending_count - self.pending_dups.len(),
-                    initial_pending_count
+                    "  discarded {n_discarded} of {initial_pending_count} pending_dups that dominated curr",
                 );
             }
         }
 
-        if self.span_bcb_dominates(self.prev(), self.curr()) {
+        if self.basic_coverage_blocks.dominates(prev_bcb, curr_bcb) {
             debug!(
                 "  different bcbs but SAME spans, and prev dominates curr. Discard prev={:?}",
                 self.prev()
@@ -714,7 +630,7 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
         if self.pending_dups.is_empty() {
             let curr_span = self.curr().span;
             self.prev_mut().cutoff_statements_at(curr_span.lo());
-            if self.prev().coverage_statements.is_empty() {
+            if self.prev().merged_spans.is_empty() {
                 debug!("  ... no non-overlapping statements to add");
             } else {
                 debug!("  ... adding modified prev={:?}", self.prev());
@@ -726,109 +642,4 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
             self.pending_dups.clear();
         }
     }
-
-    fn span_bcb_dominates(&self, dom_covspan: &CoverageSpan, covspan: &CoverageSpan) -> bool {
-        self.basic_coverage_blocks.dominates(dom_covspan.bcb, covspan.bcb)
-    }
-}
-
-/// If the MIR `Statement` has a span contributive to computing coverage spans,
-/// return it; otherwise return `None`.
-pub(super) fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
-    match statement.kind {
-        // These statements have spans that are often outside the scope of the executed source code
-        // for their parent `BasicBlock`.
-        StatementKind::StorageLive(_)
-        | StatementKind::StorageDead(_)
-        // Coverage should not be encountered, but don't inject coverage coverage
-        | StatementKind::Coverage(_)
-        // Ignore `ConstEvalCounter`s
-        | StatementKind::ConstEvalCounter
-        // Ignore `Nop`s
-        | StatementKind::Nop => None,
-
-        // FIXME(#78546): MIR InstrumentCoverage - Can the source_info.span for `FakeRead`
-        // statements be more consistent?
-        //
-        // FakeReadCause::ForGuardBinding, in this example:
-        //     match somenum {
-        //         x if x < 1 => { ... }
-        //     }...
-        // The BasicBlock within the match arm code included one of these statements, but the span
-        // for it covered the `1` in this source. The actual statements have nothing to do with that
-        // source span:
-        //     FakeRead(ForGuardBinding, _4);
-        // where `_4` is:
-        //     _4 = &_1; (at the span for the first `x`)
-        // and `_1` is the `Place` for `somenum`.
-        //
-        // If and when the Issue is resolved, remove this special case match pattern:
-        StatementKind::FakeRead(box (FakeReadCause::ForGuardBinding, _)) => None,
-
-        // Retain spans from all other statements
-        StatementKind::FakeRead(box (_, _)) // Not including `ForGuardBinding`
-        | StatementKind::Intrinsic(..)
-        | StatementKind::Assign(_)
-        | StatementKind::SetDiscriminant { .. }
-        | StatementKind::Deinit(..)
-        | StatementKind::Retag(_, _)
-        | StatementKind::PlaceMention(..)
-        | StatementKind::AscribeUserType(_, _) => {
-            Some(statement.source_info.span)
-        }
-    }
-}
-
-/// If the MIR `Terminator` has a span contributive to computing coverage spans,
-/// return it; otherwise return `None`.
-pub(super) fn filtered_terminator_span(terminator: &Terminator<'_>) -> Option<Span> {
-    match terminator.kind {
-        // These terminators have spans that don't positively contribute to computing a reasonable
-        // span of actually executed source code. (For example, SwitchInt terminators extracted from
-        // an `if condition { block }` has a span that includes the executed block, if true,
-        // but for coverage, the code region executed, up to *and* through the SwitchInt,
-        // actually stops before the if's block.)
-        TerminatorKind::Unreachable // Unreachable blocks are not connected to the MIR CFG
-        | TerminatorKind::Assert { .. }
-        | TerminatorKind::Drop { .. }
-        | TerminatorKind::SwitchInt { .. }
-        // For `FalseEdge`, only the `real` branch is taken, so it is similar to a `Goto`.
-        | TerminatorKind::FalseEdge { .. }
-        | TerminatorKind::Goto { .. } => None,
-
-        // Call `func` operand can have a more specific span when part of a chain of calls
-        | TerminatorKind::Call { ref func, .. } => {
-            let mut span = terminator.source_info.span;
-            if let mir::Operand::Constant(box constant) = func {
-                if constant.span.lo() > span.lo() {
-                    span = span.with_lo(constant.span.lo());
-                }
-            }
-            Some(span)
-        }
-
-        // Retain spans from all other terminators
-        TerminatorKind::UnwindResume
-        | TerminatorKind::UnwindTerminate(_)
-        | TerminatorKind::Return
-        | TerminatorKind::Yield { .. }
-        | TerminatorKind::GeneratorDrop
-        | TerminatorKind::FalseUnwind { .. }
-        | TerminatorKind::InlineAsm { .. } => {
-            Some(terminator.source_info.span)
-        }
-    }
-}
-
-/// Returns an extrapolated span (pre-expansion[^1]) corresponding to a range
-/// within the function's body source. This span is guaranteed to be contained
-/// within, or equal to, the `body_span`. If the extrapolated span is not
-/// contained within the `body_span`, the `body_span` is returned.
-///
-/// [^1]Expansions result from Rust syntax including macros, syntactic sugar,
-/// etc.).
-#[inline]
-pub(super) fn function_source_span(span: Span, body_span: Span) -> Span {
-    let original_span = original_sp(span, body_span).with_ctxt(body_span.ctxt());
-    if body_span.contains(original_span) { original_span } else { body_span }
 }
diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
new file mode 100644
index 00000000000..6189e5379ea
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
@@ -0,0 +1,193 @@
+use rustc_data_structures::captures::Captures;
+use rustc_middle::mir::{
+    self, AggregateKind, FakeReadCause, Rvalue, Statement, StatementKind, Terminator,
+    TerminatorKind,
+};
+use rustc_span::Span;
+
+use crate::coverage::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
+use crate::coverage::spans::CoverageSpan;
+
+pub(super) fn mir_to_initial_sorted_coverage_spans(
+    mir_body: &mir::Body<'_>,
+    fn_sig_span: Span,
+    body_span: Span,
+    basic_coverage_blocks: &CoverageGraph,
+) -> Vec<CoverageSpan> {
+    let mut initial_spans = Vec::with_capacity(mir_body.basic_blocks.len() * 2);
+    for (bcb, bcb_data) in basic_coverage_blocks.iter_enumerated() {
+        initial_spans.extend(bcb_to_initial_coverage_spans(mir_body, body_span, bcb, bcb_data));
+    }
+
+    if initial_spans.is_empty() {
+        // This can happen if, for example, the function is unreachable (contains only a
+        // `BasicBlock`(s) with an `Unreachable` terminator).
+        return initial_spans;
+    }
+
+    initial_spans.push(CoverageSpan::for_fn_sig(fn_sig_span));
+
+    initial_spans.sort_by(|a, b| {
+        // First sort by span start.
+        Ord::cmp(&a.span.lo(), &b.span.lo())
+            // If span starts are the same, sort by span end in reverse order.
+            // This ensures that if spans A and B are adjacent in the list,
+            // and they overlap but are not equal, then either:
+            // - Span A extends further left, or
+            // - Both have the same start and span A extends further right
+            .then_with(|| Ord::cmp(&a.span.hi(), &b.span.hi()).reverse())
+            // If both spans are equal, sort the BCBs in dominator order,
+            // so that dominating BCBs come before other BCBs they dominate.
+            .then_with(|| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb))
+            // If two spans are otherwise identical, put closure spans first,
+            // as this seems to be what the refinement step expects.
+            .then_with(|| Ord::cmp(&a.is_closure, &b.is_closure).reverse())
+    });
+
+    initial_spans
+}
+
+// Generate a set of `CoverageSpan`s from the filtered set of `Statement`s and `Terminator`s of
+// the `BasicBlock`(s) in the given `BasicCoverageBlockData`. One `CoverageSpan` is generated
+// for each `Statement` and `Terminator`. (Note that subsequent stages of coverage analysis will
+// merge some `CoverageSpan`s, at which point a `CoverageSpan` may represent multiple
+// `Statement`s and/or `Terminator`s.)
+fn bcb_to_initial_coverage_spans<'a, 'tcx>(
+    mir_body: &'a mir::Body<'tcx>,
+    body_span: Span,
+    bcb: BasicCoverageBlock,
+    bcb_data: &'a BasicCoverageBlockData,
+) -> impl Iterator<Item = CoverageSpan> + Captures<'a> + Captures<'tcx> {
+    bcb_data.basic_blocks.iter().flat_map(move |&bb| {
+        let data = &mir_body[bb];
+
+        let statement_spans = data.statements.iter().filter_map(move |statement| {
+            let expn_span = filtered_statement_span(statement)?;
+            let span = function_source_span(expn_span, body_span);
+
+            Some(CoverageSpan::new(span, expn_span, bcb, is_closure(statement)))
+        });
+
+        let terminator_span = Some(data.terminator()).into_iter().filter_map(move |terminator| {
+            let expn_span = filtered_terminator_span(terminator)?;
+            let span = function_source_span(expn_span, body_span);
+
+            Some(CoverageSpan::new(span, expn_span, bcb, false))
+        });
+
+        statement_spans.chain(terminator_span)
+    })
+}
+
+fn is_closure(statement: &Statement<'_>) -> bool {
+    match statement.kind {
+        StatementKind::Assign(box (_, Rvalue::Aggregate(box ref agg_kind, _))) => match agg_kind {
+            AggregateKind::Closure(_, _) | AggregateKind::Coroutine(_, _, _) => true,
+            _ => false,
+        },
+        _ => false,
+    }
+}
+
+/// If the MIR `Statement` has a span contributive to computing coverage spans,
+/// return it; otherwise return `None`.
+fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
+    match statement.kind {
+        // These statements have spans that are often outside the scope of the executed source code
+        // for their parent `BasicBlock`.
+        StatementKind::StorageLive(_)
+        | StatementKind::StorageDead(_)
+        // Coverage should not be encountered, but don't inject coverage coverage
+        | StatementKind::Coverage(_)
+        // Ignore `ConstEvalCounter`s
+        | StatementKind::ConstEvalCounter
+        // Ignore `Nop`s
+        | StatementKind::Nop => None,
+
+        // FIXME(#78546): MIR InstrumentCoverage - Can the source_info.span for `FakeRead`
+        // statements be more consistent?
+        //
+        // FakeReadCause::ForGuardBinding, in this example:
+        //     match somenum {
+        //         x if x < 1 => { ... }
+        //     }...
+        // The BasicBlock within the match arm code included one of these statements, but the span
+        // for it covered the `1` in this source. The actual statements have nothing to do with that
+        // source span:
+        //     FakeRead(ForGuardBinding, _4);
+        // where `_4` is:
+        //     _4 = &_1; (at the span for the first `x`)
+        // and `_1` is the `Place` for `somenum`.
+        //
+        // If and when the Issue is resolved, remove this special case match pattern:
+        StatementKind::FakeRead(box (FakeReadCause::ForGuardBinding, _)) => None,
+
+        // Retain spans from all other statements
+        StatementKind::FakeRead(box (_, _)) // Not including `ForGuardBinding`
+        | StatementKind::Intrinsic(..)
+        | StatementKind::Assign(_)
+        | StatementKind::SetDiscriminant { .. }
+        | StatementKind::Deinit(..)
+        | StatementKind::Retag(_, _)
+        | StatementKind::PlaceMention(..)
+        | StatementKind::AscribeUserType(_, _) => {
+            Some(statement.source_info.span)
+        }
+    }
+}
+
+/// If the MIR `Terminator` has a span contributive to computing coverage spans,
+/// return it; otherwise return `None`.
+fn filtered_terminator_span(terminator: &Terminator<'_>) -> Option<Span> {
+    match terminator.kind {
+        // These terminators have spans that don't positively contribute to computing a reasonable
+        // span of actually executed source code. (For example, SwitchInt terminators extracted from
+        // an `if condition { block }` has a span that includes the executed block, if true,
+        // but for coverage, the code region executed, up to *and* through the SwitchInt,
+        // actually stops before the if's block.)
+        TerminatorKind::Unreachable // Unreachable blocks are not connected to the MIR CFG
+        | TerminatorKind::Assert { .. }
+        | TerminatorKind::Drop { .. }
+        | TerminatorKind::SwitchInt { .. }
+        // For `FalseEdge`, only the `real` branch is taken, so it is similar to a `Goto`.
+        | TerminatorKind::FalseEdge { .. }
+        | TerminatorKind::Goto { .. } => None,
+
+        // Call `func` operand can have a more specific span when part of a chain of calls
+        | TerminatorKind::Call { ref func, .. } => {
+            let mut span = terminator.source_info.span;
+            if let mir::Operand::Constant(box constant) = func {
+                if constant.span.lo() > span.lo() {
+                    span = span.with_lo(constant.span.lo());
+                }
+            }
+            Some(span)
+        }
+
+        // Retain spans from all other terminators
+        TerminatorKind::UnwindResume
+        | TerminatorKind::UnwindTerminate(_)
+        | TerminatorKind::Return
+        | TerminatorKind::Yield { .. }
+        | TerminatorKind::CoroutineDrop
+        | TerminatorKind::FalseUnwind { .. }
+        | TerminatorKind::InlineAsm { .. } => {
+            Some(terminator.source_info.span)
+        }
+    }
+}
+
+/// Returns an extrapolated span (pre-expansion[^1]) corresponding to a range
+/// within the function's body source. This span is guaranteed to be contained
+/// within, or equal to, the `body_span`. If the extrapolated span is not
+/// contained within the `body_span`, the `body_span` is returned.
+///
+/// [^1]Expansions result from Rust syntax including macros, syntactic sugar,
+/// etc.).
+#[inline]
+fn function_source_span(span: Span, body_span: Span) -> Span {
+    use rustc_span::source_map::original_sp;
+
+    let original_span = original_sp(span, body_span).with_ctxt(body_span.ctxt());
+    if body_span.contains(original_span) { original_span } else { body_span }
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs
index 4a066ed3abd..795cbce963d 100644
--- a/compiler/rustc_mir_transform/src/coverage/tests.rs
+++ b/compiler/rustc_mir_transform/src/coverage/tests.rs
@@ -25,8 +25,7 @@
 //! to: `rustc_span::create_default_session_globals_then(|| { test_here(); })`.
 
 use super::counters;
-use super::graph;
-use super::spans;
+use super::graph::{self, BasicCoverageBlock};
 
 use coverage_test_macros::let_bcb;
 
@@ -242,7 +241,7 @@ fn print_coverage_graphviz(
                         "    {:?} [label=\"{:?}: {}\"];\n{}",
                         bcb,
                         bcb,
-                        bcb_data.terminator(mir_body).kind.name(),
+                        mir_body[bcb_data.last_bb()].terminator().kind.name(),
                         basic_coverage_blocks
                             .successors(bcb)
                             .map(|successor| { format!("    {:?} -> {:?};", bcb, successor) })
@@ -629,7 +628,7 @@ fn test_traverse_coverage_with_loops() {
     let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
     let mut traversed_in_order = Vec::new();
     let mut traversal = graph::TraverseCoverageGraphWithLoops::new(&basic_coverage_blocks);
-    while let Some(bcb) = traversal.next(&basic_coverage_blocks) {
+    while let Some(bcb) = traversal.next() {
         traversed_in_order.push(bcb);
     }
 
@@ -644,41 +643,20 @@ fn test_traverse_coverage_with_loops() {
     );
 }
 
-fn synthesize_body_span_from_terminators(mir_body: &Body<'_>) -> Span {
-    let mut some_span: Option<Span> = None;
-    for (_, data) in mir_body.basic_blocks.iter_enumerated() {
-        let term_span = data.terminator().source_info.span;
-        if let Some(span) = some_span.as_mut() {
-            *span = span.to(term_span);
-        } else {
-            some_span = Some(term_span)
-        }
-    }
-    some_span.expect("body must have at least one BasicBlock")
-}
-
 #[test]
 fn test_make_bcb_counters() {
     rustc_span::create_default_session_globals_then(|| {
         let mir_body = goto_switchint();
-        let body_span = synthesize_body_span_from_terminators(&mir_body);
         let mut basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
-        let mut coverage_spans = Vec::new();
-        for (bcb, data) in basic_coverage_blocks.iter_enumerated() {
-            if let Some(span) = spans::filtered_terminator_span(data.terminator(&mir_body)) {
-                coverage_spans.push(spans::CoverageSpan::for_terminator(
-                    spans::function_source_span(span, body_span),
-                    span,
-                    bcb,
-                    data.last_bb(),
-                ));
-            }
-        }
+        // Historically this test would use `spans` internals to set up fake
+        // coverage spans for BCBs 1 and 2. Now we skip that step and just tell
+        // BCB counter construction that those BCBs have spans.
+        let bcb_has_coverage_spans = |bcb: BasicCoverageBlock| (1..=2).contains(&bcb.as_usize());
         let mut coverage_counters = counters::CoverageCounters::new(&basic_coverage_blocks);
         coverage_counters
-            .make_bcb_counters(&mut basic_coverage_blocks, &coverage_spans)
+            .make_bcb_counters(&mut basic_coverage_blocks, bcb_has_coverage_spans)
             .expect("should be Ok");
-        assert_eq!(coverage_counters.intermediate_expressions.len(), 0);
+        assert_eq!(coverage_counters.num_expressions(), 0);
 
         let_bcb!(1);
         assert_eq!(
diff --git a/compiler/rustc_mir_transform/src/cross_crate_inline.rs b/compiler/rustc_mir_transform/src/cross_crate_inline.rs
new file mode 100644
index 00000000000..24d081f2ac9
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/cross_crate_inline.rs
@@ -0,0 +1,119 @@
+use rustc_attr::InlineAttr;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::*;
+use rustc_middle::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::OptLevel;
+
+pub fn provide(providers: &mut Providers) {
+    providers.cross_crate_inlinable = cross_crate_inlinable;
+}
+
+fn cross_crate_inlinable(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+    let codegen_fn_attrs = tcx.codegen_fn_attrs(def_id);
+    // If this has an extern indicator, then this function is globally shared and thus will not
+    // generate cgu-internal copies which would make it cross-crate inlinable.
+    if codegen_fn_attrs.contains_extern_indicator() {
+        return false;
+    }
+
+    // Obey source annotations first; this is important because it means we can use
+    // #[inline(never)] to force code generation.
+    match codegen_fn_attrs.inline {
+        InlineAttr::Never => return false,
+        InlineAttr::Hint | InlineAttr::Always => return true,
+        _ => {}
+    }
+
+    // This just reproduces the logic from Instance::requires_inline.
+    match tcx.def_kind(def_id) {
+        DefKind::Ctor(..) | DefKind::Closure => return true,
+        DefKind::Fn | DefKind::AssocFn => {}
+        _ => return false,
+    }
+
+    // Don't do any inference when incremental compilation is enabled; the additional inlining that
+    // inference permits also creates more work for small edits.
+    if tcx.sess.opts.incremental.is_some() {
+        return false;
+    }
+
+    // Don't do any inference unless optimizations are enabled.
+    if matches!(tcx.sess.opts.optimize, OptLevel::No) {
+        return false;
+    }
+
+    if !tcx.is_mir_available(def_id) {
+        return false;
+    }
+
+    let mir = tcx.optimized_mir(def_id);
+    let mut checker =
+        CostChecker { tcx, callee_body: mir, calls: 0, statements: 0, landing_pads: 0, resumes: 0 };
+    checker.visit_body(mir);
+    checker.calls == 0
+        && checker.resumes == 0
+        && checker.landing_pads == 0
+        && checker.statements
+            <= tcx.sess.opts.unstable_opts.cross_crate_inline_threshold.unwrap_or(100)
+}
+
+struct CostChecker<'b, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    callee_body: &'b Body<'tcx>,
+    calls: usize,
+    statements: usize,
+    landing_pads: usize,
+    resumes: usize,
+}
+
+impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) {
+        // Don't count StorageLive/StorageDead in the inlining cost.
+        match statement.kind {
+            StatementKind::StorageLive(_)
+            | StatementKind::StorageDead(_)
+            | StatementKind::Deinit(_)
+            | StatementKind::Nop => {}
+            _ => self.statements += 1,
+        }
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) {
+        let tcx = self.tcx;
+        match terminator.kind {
+            TerminatorKind::Drop { ref place, unwind, .. } => {
+                let ty = place.ty(self.callee_body, tcx).ty;
+                if !ty.is_trivially_pure_clone_copy() {
+                    self.calls += 1;
+                    if let UnwindAction::Cleanup(_) = unwind {
+                        self.landing_pads += 1;
+                    }
+                }
+            }
+            TerminatorKind::Call { unwind, .. } => {
+                self.calls += 1;
+                if let UnwindAction::Cleanup(_) = unwind {
+                    self.landing_pads += 1;
+                }
+            }
+            TerminatorKind::Assert { unwind, .. } => {
+                self.calls += 1;
+                if let UnwindAction::Cleanup(_) = unwind {
+                    self.landing_pads += 1;
+                }
+            }
+            TerminatorKind::UnwindResume => self.resumes += 1,
+            TerminatorKind::InlineAsm { unwind, .. } => {
+                self.statements += 1;
+                if let UnwindAction::Cleanup(_) = unwind {
+                    self.landing_pads += 1;
+                }
+            }
+            TerminatorKind::Return => {}
+            _ => self.statements += 1,
+        }
+    }
+}
diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
index 7b14fef6153..85a0be8a44c 100644
--- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
+++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
@@ -2,7 +2,6 @@
 //!
 //! Currently, this pass only propagates scalar values.
 
-use rustc_const_eval::const_eval::CheckAlignment;
 use rustc_const_eval::interpret::{ImmTy, Immediate, InterpCx, OpTy, Projectable};
 use rustc_data_structures::fx::FxHashMap;
 use rustc_hir::def::DefKind;
@@ -17,7 +16,7 @@ use rustc_mir_dataflow::value_analysis::{
 use rustc_mir_dataflow::{lattice::FlatSet, Analysis, Results, ResultsVisitor};
 use rustc_span::def_id::DefId;
 use rustc_span::DUMMY_SP;
-use rustc_target::abi::{Align, FieldIdx, VariantIdx};
+use rustc_target::abi::{FieldIdx, VariantIdx};
 
 use crate::MirPass;
 
@@ -709,23 +708,13 @@ impl<'mir, 'tcx: 'mir> rustc_const_eval::interpret::Machine<'mir, 'tcx> for Dumm
     const PANIC_ON_ALLOC_FAIL: bool = true;
 
     #[inline(always)]
-    fn enforce_alignment(_ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment {
-        // We do not check for alignment to avoid having to carry an `Align`
-        // in `ConstValue::ByRef`.
-        CheckAlignment::No
+    fn enforce_alignment(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+        false // no reason to enforce alignment
     }
 
     fn enforce_validity(_ecx: &InterpCx<'mir, 'tcx, Self>, _layout: TyAndLayout<'tcx>) -> bool {
         unimplemented!()
     }
-    fn alignment_check_failed(
-        _ecx: &InterpCx<'mir, 'tcx, Self>,
-        _has: Align,
-        _required: Align,
-        _check: CheckAlignment,
-    ) -> interpret::InterpResult<'tcx, ()> {
-        unimplemented!()
-    }
 
     fn before_access_global(
         _tcx: TyCtxt<'tcx>,
diff --git a/compiler/rustc_mir_transform/src/dead_store_elimination.rs b/compiler/rustc_mir_transform/src/dead_store_elimination.rs
index ef14105041b..3d74ef7e327 100644
--- a/compiler/rustc_mir_transform/src/dead_store_elimination.rs
+++ b/compiler/rustc_mir_transform/src/dead_store_elimination.rs
@@ -13,10 +13,10 @@
 //!
 
 use crate::util::is_within_packed;
-use rustc_index::bit_set::BitSet;
 use rustc_middle::mir::visit::Visitor;
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
+use rustc_mir_dataflow::debuginfo::debuginfo_locals;
 use rustc_mir_dataflow::impls::{
     borrowed_locals, LivenessTransferFunction, MaybeTransitiveLiveLocals,
 };
@@ -26,8 +26,15 @@ use rustc_mir_dataflow::Analysis;
 ///
 /// The `borrowed` set must be a `BitSet` of all the locals that are ever borrowed in this body. It
 /// can be generated via the [`borrowed_locals`] function.
-pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitSet<Local>) {
-    let mut live = MaybeTransitiveLiveLocals::new(borrowed)
+pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+    let borrowed_locals = borrowed_locals(body);
+
+    // If the user requests complete debuginfo, mark the locals that appear in it as live, so
+    // we don't remove assignements to them.
+    let mut always_live = debuginfo_locals(body);
+    always_live.union(&borrowed_locals);
+
+    let mut live = MaybeTransitiveLiveLocals::new(&always_live)
         .into_engine(tcx, body)
         .iterate_to_fixpoint()
         .into_results_cursor(body);
@@ -48,7 +55,9 @@ pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitS
             for (index, arg) in args.iter().enumerate().rev() {
                 if let Operand::Copy(place) = *arg
                     && !place.is_indirect()
-                    && !borrowed.contains(place.local)
+                    // Do not skip the transformation if the local is in debuginfo, as we do
+                    // not really lose any information for this purpose.
+                    && !borrowed_locals.contains(place.local)
                     && !state.contains(place.local)
                     // If `place` is a projection of a disaligned field in a packed ADT,
                     // the move may be codegened as a pointer to that field.
@@ -75,7 +84,7 @@ pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitS
                 StatementKind::Assign(box (place, _))
                 | StatementKind::SetDiscriminant { place: box place, .. }
                 | StatementKind::Deinit(box place) => {
-                    if !place.is_indirect() && !borrowed.contains(place.local) {
+                    if !place.is_indirect() && !always_live.contains(place.local) {
                         live.seek_before_primary_effect(loc);
                         if !live.get().contains(place.local) {
                             patch.push(loc);
@@ -126,7 +135,6 @@ impl<'tcx> MirPass<'tcx> for DeadStoreElimination {
     }
 
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-        let borrowed = borrowed_locals(body);
-        eliminate(tcx, body, &borrowed);
+        eliminate(tcx, body);
     }
 }
diff --git a/compiler/rustc_mir_transform/src/deref_separator.rs b/compiler/rustc_mir_transform/src/deref_separator.rs
index 95898b5b73c..42be7457018 100644
--- a/compiler/rustc_mir_transform/src/deref_separator.rs
+++ b/compiler/rustc_mir_transform/src/deref_separator.rs
@@ -37,7 +37,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for DerefChecker<'a, 'tcx> {
             for (idx, (p_ref, p_elem)) in place.iter_projections().enumerate() {
                 if !p_ref.projection.is_empty() && p_elem == ProjectionElem::Deref {
                     let ty = p_ref.ty(self.local_decls, self.tcx).ty;
-                    let temp = self.patcher.new_internal_with_info(
+                    let temp = self.patcher.new_local_with_info(
                         ty,
                         self.local_decls[p_ref.local].source_info.span,
                         LocalInfo::DerefTemp,
diff --git a/compiler/rustc_mir_transform/src/dest_prop.rs b/compiler/rustc_mir_transform/src/dest_prop.rs
index d9a132e5cf1..15502adfb5a 100644
--- a/compiler/rustc_mir_transform/src/dest_prop.rs
+++ b/compiler/rustc_mir_transform/src/dest_prop.rs
@@ -114,7 +114,7 @@
 //! approach that only works for some classes of CFGs:
 //! - rustc now has a powerful dataflow analysis framework that can handle forwards and backwards
 //!   analyses efficiently.
-//! - Layout optimizations for generators have been added to improve code generation for
+//! - Layout optimizations for coroutines have been added to improve code generation for
 //!   async/await, which are very similar in spirit to what this optimization does.
 //!
 //! Also, rustc now has a simple NRVO pass (see `nrvo.rs`), which handles a subset of the cases that
@@ -244,7 +244,7 @@ impl<'tcx> MirPass<'tcx> for DestinationPropagation {
         if round_count != 0 {
             // Merging can introduce overlap between moved arguments and/or call destination in an
             // unreachable code, which validator considers to be ill-formed.
-            remove_dead_blocks(tcx, body);
+            remove_dead_blocks(body);
         }
 
         trace!(round_count);
@@ -655,7 +655,7 @@ impl WriteInfo {
                 // `Drop`s create a `&mut` and so are not considered
             }
             TerminatorKind::Yield { .. }
-            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::CoroutineDrop
             | TerminatorKind::FalseEdge { .. }
             | TerminatorKind::FalseUnwind { .. } => {
                 bug!("{:?} not found in this MIR phase", terminator)
diff --git a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs
index 319fb4eaf3e..6eb6cb069fe 100644
--- a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs
+++ b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs
@@ -95,6 +95,7 @@ pub struct EarlyOtherwiseBranch;
 
 impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch {
     fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+        // unsound: https://github.com/rust-lang/rust/issues/95162
         sess.mir_opt_level() >= 3 && sess.opts.unstable_opts.unsound_mir_opts
     }
 
diff --git a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
index e51f771e00d..1c917a85c03 100644
--- a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
@@ -69,7 +69,7 @@ impl<'tcx, 'a> MutVisitor<'tcx> for ElaborateBoxDerefVisitor<'tcx, 'a> {
             let (unique_ty, nonnull_ty, ptr_ty) =
                 build_ptr_tys(tcx, base_ty.boxed_ty(), self.unique_did, self.nonnull_did);
 
-            let ptr_local = self.patch.new_internal(ptr_ty, source_info.span);
+            let ptr_local = self.patch.new_temp(ptr_ty, source_info.span);
 
             self.patch.add_assign(
                 location,
diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs
index b62d7da2a4c..59156b2427c 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs
@@ -9,9 +9,9 @@ use rustc_mir_dataflow::elaborate_drops::{elaborate_drop, DropFlagState, Unwind}
 use rustc_mir_dataflow::elaborate_drops::{DropElaborator, DropFlagMode, DropStyle};
 use rustc_mir_dataflow::impls::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
 use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex};
+use rustc_mir_dataflow::on_all_children_bits;
 use rustc_mir_dataflow::on_lookup_result_bits;
 use rustc_mir_dataflow::MoveDataParamEnv;
-use rustc_mir_dataflow::{on_all_children_bits, on_all_drop_children_bits};
 use rustc_mir_dataflow::{Analysis, ResultsCursor};
 use rustc_span::Span;
 use rustc_target::abi::{FieldIdx, VariantIdx};
@@ -54,16 +54,10 @@ impl<'tcx> MirPass<'tcx> for ElaborateDrops {
 
         let def_id = body.source.def_id();
         let param_env = tcx.param_env_reveal_all_normalized(def_id);
-        let move_data = match MoveData::gather_moves(body, tcx, param_env) {
-            Ok(move_data) => move_data,
-            Err((move_data, _)) => {
-                tcx.sess.delay_span_bug(
-                    body.span,
-                    "No `move_errors` should be allowed in MIR borrowck",
-                );
-                move_data
-            }
-        };
+        // For types that do not need dropping, the behaviour is trivial. So we only need to track
+        // init/uninit for types that do need dropping.
+        let move_data =
+            MoveData::gather_moves(&body, tcx, param_env, |ty| ty.needs_drop(tcx, param_env));
         let elaborate_patch = {
             let env = MoveDataParamEnv { move_data, param_env };
 
@@ -178,13 +172,19 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, '_, 'tcx> {
                 let mut some_live = false;
                 let mut some_dead = false;
                 let mut children_count = 0;
-                on_all_drop_children_bits(self.tcx(), self.body(), self.ctxt.env, path, |child| {
-                    let (live, dead) = self.ctxt.init_data.maybe_live_dead(child);
-                    debug!("elaborate_drop: state({:?}) = {:?}", child, (live, dead));
-                    some_live |= live;
-                    some_dead |= dead;
-                    children_count += 1;
-                });
+                on_all_children_bits(
+                    self.tcx(),
+                    self.body(),
+                    self.ctxt.move_data(),
+                    path,
+                    |child| {
+                        let (live, dead) = self.ctxt.init_data.maybe_live_dead(child);
+                        debug!("elaborate_drop: state({:?}) = {:?}", child, (live, dead));
+                        some_live |= live;
+                        some_dead |= dead;
+                        children_count += 1;
+                    },
+                );
                 ((some_live, some_dead), children_count != 1)
             }
         };
@@ -271,7 +271,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
         let tcx = self.tcx;
         let patch = &mut self.patch;
         debug!("create_drop_flag({:?})", self.body.span);
-        self.drop_flags[index].get_or_insert_with(|| patch.new_internal(tcx.types.bool, span));
+        self.drop_flags[index].get_or_insert_with(|| patch.new_temp(tcx.types.bool, span));
     }
 
     fn drop_flag(&mut self, index: MovePathIndex) -> Option<Place<'tcx>> {
@@ -296,26 +296,36 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
     fn collect_drop_flags(&mut self) {
         for (bb, data) in self.body.basic_blocks.iter_enumerated() {
             let terminator = data.terminator();
-            let place = match terminator.kind {
-                TerminatorKind::Drop { ref place, .. } => place,
-                _ => continue,
-            };
-
-            self.init_data.seek_before(self.body.terminator_loc(bb));
+            let TerminatorKind::Drop { ref place, .. } = terminator.kind else { continue };
 
             let path = self.move_data().rev_lookup.find(place.as_ref());
             debug!("collect_drop_flags: {:?}, place {:?} ({:?})", bb, place, path);
 
-            let path = match path {
-                LookupResult::Exact(e) => e,
-                LookupResult::Parent(None) => continue,
+            match path {
+                LookupResult::Exact(path) => {
+                    self.init_data.seek_before(self.body.terminator_loc(bb));
+                    on_all_children_bits(self.tcx, self.body, self.move_data(), path, |child| {
+                        let (maybe_live, maybe_dead) = self.init_data.maybe_live_dead(child);
+                        debug!(
+                            "collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
+                            child,
+                            place,
+                            path,
+                            (maybe_live, maybe_dead)
+                        );
+                        if maybe_live && maybe_dead {
+                            self.create_drop_flag(child, terminator.source_info.span)
+                        }
+                    });
+                }
+                LookupResult::Parent(None) => {}
                 LookupResult::Parent(Some(parent)) => {
-                    let (_maybe_live, maybe_dead) = self.init_data.maybe_live_dead(parent);
-
                     if self.body.local_decls[place.local].is_deref_temp() {
                         continue;
                     }
 
+                    self.init_data.seek_before(self.body.terminator_loc(bb));
+                    let (_maybe_live, maybe_dead) = self.init_data.maybe_live_dead(parent);
                     if maybe_dead {
                         self.tcx.sess.delay_span_bug(
                             terminator.source_info.span,
@@ -324,80 +334,74 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
                             ),
                         );
                     }
-                    continue;
                 }
             };
-
-            on_all_drop_children_bits(self.tcx, self.body, self.env, path, |child| {
-                let (maybe_live, maybe_dead) = self.init_data.maybe_live_dead(child);
-                debug!(
-                    "collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
-                    child,
-                    place,
-                    path,
-                    (maybe_live, maybe_dead)
-                );
-                if maybe_live && maybe_dead {
-                    self.create_drop_flag(child, terminator.source_info.span)
-                }
-            });
         }
     }
 
     fn elaborate_drops(&mut self) {
+        // This function should mirror what `collect_drop_flags` does.
         for (bb, data) in self.body.basic_blocks.iter_enumerated() {
-            let loc = Location { block: bb, statement_index: data.statements.len() };
             let terminator = data.terminator();
+            let TerminatorKind::Drop { place, target, unwind, replace } = terminator.kind else {
+                continue;
+            };
 
-            match terminator.kind {
-                TerminatorKind::Drop { place, target, unwind, replace } => {
-                    self.init_data.seek_before(loc);
-                    match self.move_data().rev_lookup.find(place.as_ref()) {
-                        LookupResult::Exact(path) => {
-                            let unwind = if data.is_cleanup {
-                                Unwind::InCleanup
-                            } else {
-                                match unwind {
-                                    UnwindAction::Cleanup(cleanup) => Unwind::To(cleanup),
-                                    UnwindAction::Continue => Unwind::To(self.patch.resume_block()),
-                                    UnwindAction::Unreachable => {
-                                        Unwind::To(self.patch.unreachable_cleanup_block())
-                                    }
-                                    UnwindAction::Terminate(reason) => {
-                                        debug_assert_ne!(
-                                            reason,
-                                            UnwindTerminateReason::InCleanup,
-                                            "we are not in a cleanup block, InCleanup reason should be impossible"
-                                        );
-                                        Unwind::To(self.patch.terminate_block(reason))
-                                    }
-                                }
-                            };
-                            elaborate_drop(
-                                &mut Elaborator { ctxt: self },
-                                terminator.source_info,
-                                place,
-                                path,
-                                target,
-                                unwind,
-                                bb,
-                            )
+            // This place does not need dropping. It does not have an associated move-path, so the
+            // match below will conservatively keep an unconditional drop. As that drop is useless,
+            // just remove it here and now.
+            if !place
+                .ty(&self.body.local_decls, self.tcx)
+                .ty
+                .needs_drop(self.tcx, self.env.param_env)
+            {
+                self.patch.patch_terminator(bb, TerminatorKind::Goto { target });
+                continue;
+            }
+
+            let path = self.move_data().rev_lookup.find(place.as_ref());
+            match path {
+                LookupResult::Exact(path) => {
+                    let unwind = match unwind {
+                        _ if data.is_cleanup => Unwind::InCleanup,
+                        UnwindAction::Cleanup(cleanup) => Unwind::To(cleanup),
+                        UnwindAction::Continue => Unwind::To(self.patch.resume_block()),
+                        UnwindAction::Unreachable => {
+                            Unwind::To(self.patch.unreachable_cleanup_block())
                         }
-                        LookupResult::Parent(..) => {
-                            if !replace {
-                                self.tcx.sess.delay_span_bug(
-                                    terminator.source_info.span,
-                                    format!("drop of untracked value {bb:?}"),
-                                );
-                            }
-                            // A drop and replace behind a pointer/array/whatever.
-                            // The borrow checker requires that these locations are initialized before the assignment,
-                            // so we just leave an unconditional drop.
-                            assert!(!data.is_cleanup);
+                        UnwindAction::Terminate(reason) => {
+                            debug_assert_ne!(
+                                reason,
+                                UnwindTerminateReason::InCleanup,
+                                "we are not in a cleanup block, InCleanup reason should be impossible"
+                            );
+                            Unwind::To(self.patch.terminate_block(reason))
                         }
+                    };
+                    self.init_data.seek_before(self.body.terminator_loc(bb));
+                    elaborate_drop(
+                        &mut Elaborator { ctxt: self },
+                        terminator.source_info,
+                        place,
+                        path,
+                        target,
+                        unwind,
+                        bb,
+                    )
+                }
+                LookupResult::Parent(None) => {}
+                LookupResult::Parent(Some(_)) => {
+                    if !replace {
+                        self.tcx.sess.delay_span_bug(
+                            terminator.source_info.span,
+                            format!("drop of untracked value {bb:?}"),
+                        );
                     }
+                    // A drop and replace behind a pointer/array/whatever.
+                    // The borrow checker requires that these locations are initialized before the assignment,
+                    // so we just leave an unconditional drop.
+                    assert!(!data.is_cleanup);
                 }
-                _ => continue,
             }
         }
     }
diff --git a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
index d202860840c..26fcfad8287 100644
--- a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
+++ b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
@@ -58,7 +58,7 @@ fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool {
     let body_abi = match body_ty.kind() {
         ty::FnDef(..) => body_ty.fn_sig(tcx).abi(),
         ty::Closure(..) => Abi::RustCall,
-        ty::Generator(..) => Abi::Rust,
+        ty::Coroutine(..) => Abi::Rust,
         _ => span_bug!(body.span, "unexpected body ty: {:?}", body_ty),
     };
     let body_can_unwind = layout::fn_can_unwind(tcx, Some(def_id), body_abi);
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index 449bade3322..eece7c3e834 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -63,7 +63,7 @@ use rustc_middle::mir::*;
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_target::abi::{VariantIdx, FIRST_VARIANT};
 
-use crate::ssa::SsaLocals;
+use crate::ssa::{AssignedValue, SsaLocals};
 use crate::MirPass;
 
 pub struct GVN;
@@ -87,21 +87,28 @@ fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     let dominators = body.basic_blocks.dominators().clone();
 
     let mut state = VnState::new(tcx, param_env, &ssa, &dominators, &body.local_decls);
-    for arg in body.args_iter() {
-        if ssa.is_ssa(arg) {
-            let value = state.new_opaque().unwrap();
-            state.assign(arg, value);
-        }
-    }
-
-    ssa.for_each_assignment_mut(&mut body.basic_blocks, |local, rvalue, location| {
-        let value = state.simplify_rvalue(rvalue, location).or_else(|| state.new_opaque()).unwrap();
-        // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark `local` as
-        // reusable if we have an exact type match.
-        if state.local_decls[local].ty == rvalue.ty(state.local_decls, tcx) {
+    ssa.for_each_assignment_mut(
+        body.basic_blocks.as_mut_preserves_cfg(),
+        |local, value, location| {
+            let value = match value {
+                // We do not know anything of this assigned value.
+                AssignedValue::Arg | AssignedValue::Terminator(_) => None,
+                // Try to get some insight.
+                AssignedValue::Rvalue(rvalue) => {
+                    let value = state.simplify_rvalue(rvalue, location);
+                    // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark `local` as
+                    // reusable if we have an exact type match.
+                    if state.local_decls[local].ty != rvalue.ty(state.local_decls, tcx) {
+                        return;
+                    }
+                    value
+                }
+            };
+            // `next_opaque` is `Some`, so `new_opaque` must return `Some`.
+            let value = value.or_else(|| state.new_opaque()).unwrap();
             state.assign(local, value);
-        }
-    });
+        },
+    );
 
     // Stop creating opaques during replacement as it is useless.
     state.next_opaque = None;
@@ -306,12 +313,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 }
                 ProjectionElem::Downcast(name, index) => ProjectionElem::Downcast(name, index),
                 ProjectionElem::OpaqueCast(ty) => ProjectionElem::OpaqueCast(ty),
+                ProjectionElem::Subtype(ty) => ProjectionElem::Subtype(ty),
             };
             value = self.insert(Value::Projection(value, proj));
         }
 
         if let Some(local) = self.try_as_local(value, location)
-            && local != place.local // in case we had no projection to begin with.
+            && local != place.local
+        // in case we had no projection to begin with.
         {
             *place = local.into();
             self.reused_locals.insert(local);
@@ -374,7 +383,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     AggregateKind::Array(..)
                     | AggregateKind::Tuple
                     | AggregateKind::Closure(..)
-                    | AggregateKind::Generator(..) => FIRST_VARIANT,
+                    | AggregateKind::Coroutine(..) => FIRST_VARIANT,
                     AggregateKind::Adt(_, variant_index, _, _, None) => variant_index,
                     // Do not track unions.
                     AggregateKind::Adt(_, _, _, _, Some(_)) => return None,
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index ebd61f8ad95..8b33e00c63c 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -14,6 +14,7 @@ use rustc_session::config::OptLevel;
 use rustc_target::abi::FieldIdx;
 use rustc_target::spec::abi::Abi;
 
+use crate::cost_checker::CostChecker;
 use crate::simplify::{remove_dead_blocks, CfgSimplifier};
 use crate::util;
 use crate::MirPass;
@@ -22,11 +23,6 @@ use std::ops::{Range, RangeFrom};
 
 pub(crate) mod cycle;
 
-const INSTR_COST: usize = 5;
-const CALL_PENALTY: usize = 25;
-const LANDINGPAD_PENALTY: usize = 50;
-const RESUME_PENALTY: usize = 45;
-
 const TOP_DOWN_DEPTH_LIMIT: usize = 5;
 
 pub struct Inline;
@@ -63,7 +59,7 @@ impl<'tcx> MirPass<'tcx> for Inline {
         if inline(tcx, body) {
             debug!("running simplify cfg on {:?}", body.source);
             CfgSimplifier::new(body).simplify();
-            remove_dead_blocks(tcx, body);
+            remove_dead_blocks(body);
             deref_finder(tcx, body);
         }
     }
@@ -79,10 +75,10 @@ fn inline<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool {
     if body.source.promoted.is_some() {
         return false;
     }
-    // Avoid inlining into generators, since their `optimized_mir` is used for layout computation,
+    // Avoid inlining into coroutines, since their `optimized_mir` is used for layout computation,
     // which can create a cycle, even when no attempt is made to inline the function in the other
     // direction.
-    if body.generator.is_some() {
+    if body.coroutine.is_some() {
         return false;
     }
 
@@ -169,8 +165,11 @@ impl<'tcx> Inliner<'tcx> {
         caller_body: &mut Body<'tcx>,
         callsite: &CallSite<'tcx>,
     ) -> Result<std::ops::Range<BasicBlock>, &'static str> {
+        self.check_mir_is_available(caller_body, &callsite.callee)?;
+
         let callee_attrs = self.tcx.codegen_fn_attrs(callsite.callee.def_id());
-        self.check_codegen_attributes(callsite, callee_attrs)?;
+        let cross_crate_inlinable = self.tcx.cross_crate_inlinable(callsite.callee.def_id());
+        self.check_codegen_attributes(callsite, callee_attrs, cross_crate_inlinable)?;
 
         let terminator = caller_body[callsite.block].terminator.as_ref().unwrap();
         let TerminatorKind::Call { args, destination, .. } = &terminator.kind else { bug!() };
@@ -183,9 +182,8 @@ impl<'tcx> Inliner<'tcx> {
             }
         }
 
-        self.check_mir_is_available(caller_body, &callsite.callee)?;
         let callee_body = try_instance_mir(self.tcx, callsite.callee.def)?;
-        self.check_mir_body(callsite, callee_body, callee_attrs)?;
+        self.check_mir_body(callsite, callee_body, callee_attrs, cross_crate_inlinable)?;
 
         if !self.tcx.consider_optimizing(|| {
             format!("Inline {:?} into {:?}", callsite.callee, caller_body.source)
@@ -218,7 +216,13 @@ impl<'tcx> Inliner<'tcx> {
         // Normally, this shouldn't be required, but trait normalization failure can create a
         // validation ICE.
         let output_type = callee_body.return_ty();
-        if !util::is_subtype(self.tcx, self.param_env, output_type, destination_ty) {
+        if !util::relate_types(
+            self.tcx,
+            self.param_env,
+            ty::Variance::Covariant,
+            output_type,
+            destination_ty,
+        ) {
             trace!(?output_type, ?destination_ty);
             return Err("failed to normalize return type");
         }
@@ -248,7 +252,13 @@ impl<'tcx> Inliner<'tcx> {
                 self_arg_ty.into_iter().chain(arg_tuple_tys).zip(callee_body.args_iter())
             {
                 let input_type = callee_body.local_decls[input].ty;
-                if !util::is_subtype(self.tcx, self.param_env, input_type, arg_ty) {
+                if !util::relate_types(
+                    self.tcx,
+                    self.param_env,
+                    ty::Variance::Covariant,
+                    input_type,
+                    arg_ty,
+                ) {
                     trace!(?arg_ty, ?input_type);
                     return Err("failed to normalize tuple argument type");
                 }
@@ -257,7 +267,13 @@ impl<'tcx> Inliner<'tcx> {
             for (arg, input) in args.iter().zip(callee_body.args_iter()) {
                 let input_type = callee_body.local_decls[input].ty;
                 let arg_ty = arg.ty(&caller_body.local_decls, self.tcx);
-                if !util::is_subtype(self.tcx, self.param_env, input_type, arg_ty) {
+                if !util::relate_types(
+                    self.tcx,
+                    self.param_env,
+                    ty::Variance::Covariant,
+                    input_type,
+                    arg_ty,
+                ) {
                     trace!(?arg_ty, ?input_type);
                     return Err("failed to normalize argument type");
                 }
@@ -383,6 +399,7 @@ impl<'tcx> Inliner<'tcx> {
         &self,
         callsite: &CallSite<'tcx>,
         callee_attrs: &CodegenFnAttrs,
+        cross_crate_inlinable: bool,
     ) -> Result<(), &'static str> {
         if let InlineAttr::Never = callee_attrs.inline {
             return Err("never inline hint");
@@ -396,7 +413,7 @@ impl<'tcx> Inliner<'tcx> {
             .non_erasable_generics(self.tcx, callsite.callee.def_id())
             .next()
             .is_some();
-        if !is_generic && !callee_attrs.requests_inline() {
+        if !is_generic && !cross_crate_inlinable {
             return Err("not exported");
         }
 
@@ -438,10 +455,11 @@ impl<'tcx> Inliner<'tcx> {
         callsite: &CallSite<'tcx>,
         callee_body: &Body<'tcx>,
         callee_attrs: &CodegenFnAttrs,
+        cross_crate_inlinable: bool,
     ) -> Result<(), &'static str> {
         let tcx = self.tcx;
 
-        let mut threshold = if callee_attrs.requests_inline() {
+        let mut threshold = if cross_crate_inlinable {
             self.tcx.sess.opts.unstable_opts.inline_mir_hint_threshold.unwrap_or(100)
         } else {
             self.tcx.sess.opts.unstable_opts.inline_mir_threshold.unwrap_or(50)
@@ -457,13 +475,8 @@ impl<'tcx> Inliner<'tcx> {
 
         // FIXME: Give a bonus to functions with only a single caller
 
-        let mut checker = CostChecker {
-            tcx: self.tcx,
-            param_env: self.param_env,
-            instance: callsite.callee,
-            callee_body,
-            cost: 0,
-        };
+        let mut checker =
+            CostChecker::new(self.tcx, self.param_env, Some(callsite.callee), callee_body);
 
         // Traverse the MIR manually so we can account for the effects of inlining on the CFG.
         let mut work_list = vec![START_BLOCK];
@@ -485,7 +498,9 @@ impl<'tcx> Inliner<'tcx> {
                     self.tcx,
                     ty::EarlyBinder::bind(&place.ty(callee_body, tcx).ty),
                 );
-                if ty.needs_drop(tcx, self.param_env) && let UnwindAction::Cleanup(unwind) = unwind {
+                if ty.needs_drop(tcx, self.param_env)
+                    && let UnwindAction::Cleanup(unwind) = unwind
+                {
                     work_list.push(unwind);
                 }
             } else if callee_attrs.instruction_set != self.codegen_fn_attrs.instruction_set
@@ -506,7 +521,7 @@ impl<'tcx> Inliner<'tcx> {
         // That attribute is often applied to very large functions that exceed LLVM's (very
         // generous) inlining threshold. Such functions are very poor MIR inlining candidates.
         // Always inlining #[inline(always)] functions in MIR, on net, slows down the compiler.
-        let cost = checker.cost;
+        let cost = checker.cost();
         if cost <= threshold {
             debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
             Ok(())
@@ -598,9 +613,7 @@ impl<'tcx> Inliner<'tcx> {
                 // If there are any locals without storage markers, give them storage only for the
                 // duration of the call.
                 for local in callee_body.vars_and_temps_iter() {
-                    if !callee_body.local_decls[local].internal
-                        && integrator.always_live_locals.contains(local)
-                    {
+                    if integrator.always_live_locals.contains(local) {
                         let new_local = integrator.map_local(local);
                         caller_body[callsite.block].statements.push(Statement {
                             source_info: callsite.source_info,
@@ -623,9 +636,7 @@ impl<'tcx> Inliner<'tcx> {
                         n += 1;
                     }
                     for local in callee_body.vars_and_temps_iter().rev() {
-                        if !callee_body.local_decls[local].internal
-                            && integrator.always_live_locals.contains(local)
-                        {
+                        if integrator.always_live_locals.contains(local) {
                             let new_local = integrator.map_local(local);
                             caller_body[block].statements.push(Statement {
                                 source_info: callsite.source_info,
@@ -783,79 +794,6 @@ impl<'tcx> Inliner<'tcx> {
     }
 }
 
-/// Verify that the callee body is compatible with the caller.
-///
-/// This visitor mostly computes the inlining cost,
-/// but also needs to verify that types match because of normalization failure.
-struct CostChecker<'b, 'tcx> {
-    tcx: TyCtxt<'tcx>,
-    param_env: ParamEnv<'tcx>,
-    cost: usize,
-    callee_body: &'b Body<'tcx>,
-    instance: ty::Instance<'tcx>,
-}
-
-impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
-    fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) {
-        // Don't count StorageLive/StorageDead in the inlining cost.
-        match statement.kind {
-            StatementKind::StorageLive(_)
-            | StatementKind::StorageDead(_)
-            | StatementKind::Deinit(_)
-            | StatementKind::Nop => {}
-            _ => self.cost += INSTR_COST,
-        }
-    }
-
-    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) {
-        let tcx = self.tcx;
-        match terminator.kind {
-            TerminatorKind::Drop { ref place, unwind, .. } => {
-                // If the place doesn't actually need dropping, treat it like a regular goto.
-                let ty = self.instance.instantiate_mir(
-                    tcx,
-                    ty::EarlyBinder::bind(&place.ty(self.callee_body, tcx).ty),
-                );
-                if ty.needs_drop(tcx, self.param_env) {
-                    self.cost += CALL_PENALTY;
-                    if let UnwindAction::Cleanup(_) = unwind {
-                        self.cost += LANDINGPAD_PENALTY;
-                    }
-                } else {
-                    self.cost += INSTR_COST;
-                }
-            }
-            TerminatorKind::Call { func: Operand::Constant(ref f), unwind, .. } => {
-                let fn_ty =
-                    self.instance.instantiate_mir(tcx, ty::EarlyBinder::bind(&f.const_.ty()));
-                self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind() && tcx.is_intrinsic(def_id) {
-                    // Don't give intrinsics the extra penalty for calls
-                    INSTR_COST
-                } else {
-                    CALL_PENALTY
-                };
-                if let UnwindAction::Cleanup(_) = unwind {
-                    self.cost += LANDINGPAD_PENALTY;
-                }
-            }
-            TerminatorKind::Assert { unwind, .. } => {
-                self.cost += CALL_PENALTY;
-                if let UnwindAction::Cleanup(_) = unwind {
-                    self.cost += LANDINGPAD_PENALTY;
-                }
-            }
-            TerminatorKind::UnwindResume => self.cost += RESUME_PENALTY,
-            TerminatorKind::InlineAsm { unwind, .. } => {
-                self.cost += INSTR_COST;
-                if let UnwindAction::Cleanup(_) = unwind {
-                    self.cost += LANDINGPAD_PENALTY;
-                }
-            }
-            _ => self.cost += INSTR_COST,
-        }
-    }
-}
-
 /**
  * Integrator.
  *
@@ -992,7 +930,7 @@ impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
         }
 
         match terminator.kind {
-            TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => bug!(),
+            TerminatorKind::CoroutineDrop | TerminatorKind::Yield { .. } => bug!(),
             TerminatorKind::Goto { ref mut target } => {
                 *target = self.map_block(*target);
             }
diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs
index a6ef2e11aa8..fbcd6e75ad4 100644
--- a/compiler/rustc_mir_transform/src/instsimplify.rs
+++ b/compiler/rustc_mir_transform/src/instsimplify.rs
@@ -93,7 +93,9 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
                     _ => None,
                 };
 
-                if let Some(new) = new && self.should_simplify(source_info, rvalue) {
+                if let Some(new) = new
+                    && self.should_simplify(source_info, rvalue)
+                {
                     *rvalue = new;
                 }
             }
@@ -150,7 +152,8 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
                 *rvalue = Rvalue::Use(operand.clone());
             } else if *kind == CastKind::Transmute {
                 // Transmuting an integer to another integer is just a signedness cast
-                if let (ty::Int(int), ty::Uint(uint)) | (ty::Uint(uint), ty::Int(int)) = (operand_ty.kind(), cast_ty.kind())
+                if let (ty::Int(int), ty::Uint(uint)) | (ty::Uint(uint), ty::Int(int)) =
+                    (operand_ty.kind(), cast_ty.kind())
                     && int.bit_width() == uint.bit_width()
                 {
                     // The width check isn't strictly necessary, as different widths
@@ -172,8 +175,15 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
                     for (i, field) in variant.fields.iter().enumerate() {
                         let field_ty = field.ty(self.tcx, args);
                         if field_ty == *cast_ty {
-                            let place = place.project_deeper(&[ProjectionElem::Field(FieldIdx::from_usize(i), *cast_ty)], self.tcx);
-                            let operand = if operand.is_move() { Operand::Move(place) } else { Operand::Copy(place) };
+                            let place = place.project_deeper(
+                                &[ProjectionElem::Field(FieldIdx::from_usize(i), *cast_ty)],
+                                self.tcx,
+                            );
+                            let operand = if operand.is_move() {
+                                Operand::Move(place)
+                            } else {
+                                Operand::Copy(place)
+                            };
                             *rvalue = Rvalue::Use(operand);
                             return;
                         }
diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs
new file mode 100644
index 00000000000..7b918be4474
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/jump_threading.rs
@@ -0,0 +1,759 @@
+//! A jump threading optimization.
+//!
+//! This optimization seeks to replace join-then-switch control flow patterns by straight jumps
+//!    X = 0                                      X = 0
+//! ------------\      /--------              ------------
+//!    X = 1     X----X SwitchInt(X)     =>       X = 1
+//! ------------/      \--------              ------------
+//!
+//!
+//! We proceed by walking the cfg backwards starting from each `SwitchInt` terminator,
+//! looking for assignments that will turn the `SwitchInt` into a simple `Goto`.
+//!
+//! The algorithm maintains a set of replacement conditions:
+//! - `conditions[place]` contains `Condition { value, polarity: Eq, target }`
+//!   if assigning `value` to `place` turns the `SwitchInt` into `Goto { target }`.
+//! - `conditions[place]` contains `Condition { value, polarity: Ne, target }`
+//!   if assigning anything different from `value` to `place` turns the `SwitchInt`
+//!   into `Goto { target }`.
+//!
+//! In this file, we denote as `place ?= value` the existence of a replacement condition
+//! on `place` with given `value`, irrespective of the polarity and target of that
+//! replacement condition.
+//!
+//! We then walk the CFG backwards transforming the set of conditions.
+//! When we find a fulfilling assignment, we record a `ThreadingOpportunity`.
+//! All `ThreadingOpportunity`s are applied to the body, by duplicating blocks if required.
+//!
+//! The optimization search can be very heavy, as it performs a DFS on MIR starting from
+//! each `SwitchInt` terminator. To manage the complexity, we:
+//! - bound the maximum depth by a constant `MAX_BACKTRACK`;
+//! - we only traverse `Goto` terminators.
+//!
+//! We try to avoid creating irreducible control-flow by not threading through a loop header.
+//!
+//! Likewise, applying the optimisation can create a lot of new MIR, so we bound the instruction
+//! cost by `MAX_COST`.
+
+use rustc_arena::DroplessArena;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_index::bit_set::BitSet;
+use rustc_index::IndexVec;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
+use rustc_mir_dataflow::value_analysis::{Map, PlaceIndex, State, TrackElem};
+
+use crate::cost_checker::CostChecker;
+use crate::MirPass;
+
+pub struct JumpThreading;
+
+const MAX_BACKTRACK: usize = 5;
+const MAX_COST: usize = 100;
+const MAX_PLACES: usize = 100;
+
+impl<'tcx> MirPass<'tcx> for JumpThreading {
+    fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+        sess.mir_opt_level() >= 4
+    }
+
+    #[instrument(skip_all level = "debug")]
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        let def_id = body.source.def_id();
+        debug!(?def_id);
+
+        let param_env = tcx.param_env_reveal_all_normalized(def_id);
+        let map = Map::new(tcx, body, Some(MAX_PLACES));
+        let loop_headers = loop_headers(body);
+
+        let arena = DroplessArena::default();
+        let mut finder = TOFinder {
+            tcx,
+            param_env,
+            body,
+            arena: &arena,
+            map: &map,
+            loop_headers: &loop_headers,
+            opportunities: Vec::new(),
+        };
+
+        for (bb, bbdata) in body.basic_blocks.iter_enumerated() {
+            debug!(?bb, term = ?bbdata.terminator());
+            if bbdata.is_cleanup || loop_headers.contains(bb) {
+                continue;
+            }
+            let Some((discr, targets)) = bbdata.terminator().kind.as_switch() else { continue };
+            let Some(discr) = discr.place() else { continue };
+            debug!(?discr, ?bb);
+
+            let discr_ty = discr.ty(body, tcx).ty;
+            let Ok(discr_layout) = tcx.layout_of(param_env.and(discr_ty)) else { continue };
+
+            let Some(discr) = finder.map.find(discr.as_ref()) else { continue };
+            debug!(?discr);
+
+            let cost = CostChecker::new(tcx, param_env, None, body);
+
+            let mut state = State::new(ConditionSet::default(), &finder.map);
+
+            let conds = if let Some((value, then, else_)) = targets.as_static_if() {
+                let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else {
+                    continue;
+                };
+                arena.alloc_from_iter([
+                    Condition { value, polarity: Polarity::Eq, target: then },
+                    Condition { value, polarity: Polarity::Ne, target: else_ },
+                ])
+            } else {
+                arena.alloc_from_iter(targets.iter().filter_map(|(value, target)| {
+                    let value = ScalarInt::try_from_uint(value, discr_layout.size)?;
+                    Some(Condition { value, polarity: Polarity::Eq, target })
+                }))
+            };
+            let conds = ConditionSet(conds);
+            state.insert_value_idx(discr, conds, &finder.map);
+
+            finder.find_opportunity(bb, state, cost, 0);
+        }
+
+        let opportunities = finder.opportunities;
+        debug!(?opportunities);
+        if opportunities.is_empty() {
+            return;
+        }
+
+        // Verify that we do not thread through a loop header.
+        for to in opportunities.iter() {
+            assert!(to.chain.iter().all(|&block| !loop_headers.contains(block)));
+        }
+        OpportunitySet::new(body, opportunities).apply(body);
+    }
+}
+
+#[derive(Debug)]
+struct ThreadingOpportunity {
+    /// The list of `BasicBlock`s from the one that found the opportunity to the `SwitchInt`.
+    chain: Vec<BasicBlock>,
+    /// The `SwitchInt` will be replaced by `Goto { target }`.
+    target: BasicBlock,
+}
+
+struct TOFinder<'tcx, 'a> {
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    body: &'a Body<'tcx>,
+    map: &'a Map,
+    loop_headers: &'a BitSet<BasicBlock>,
+    /// We use an arena to avoid cloning the slices when cloning `state`.
+    arena: &'a DroplessArena,
+    opportunities: Vec<ThreadingOpportunity>,
+}
+
+/// Represent the following statement. If we can prove that the current local is equal/not-equal
+/// to `value`, jump to `target`.
+#[derive(Copy, Clone, Debug)]
+struct Condition {
+    value: ScalarInt,
+    polarity: Polarity,
+    target: BasicBlock,
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+enum Polarity {
+    Ne,
+    Eq,
+}
+
+impl Condition {
+    fn matches(&self, value: ScalarInt) -> bool {
+        (self.value == value) == (self.polarity == Polarity::Eq)
+    }
+
+    fn inv(mut self) -> Self {
+        self.polarity = match self.polarity {
+            Polarity::Eq => Polarity::Ne,
+            Polarity::Ne => Polarity::Eq,
+        };
+        self
+    }
+}
+
+#[derive(Copy, Clone, Debug, Default)]
+struct ConditionSet<'a>(&'a [Condition]);
+
+impl<'a> ConditionSet<'a> {
+    fn iter(self) -> impl Iterator<Item = Condition> + 'a {
+        self.0.iter().copied()
+    }
+
+    fn iter_matches(self, value: ScalarInt) -> impl Iterator<Item = Condition> + 'a {
+        self.iter().filter(move |c| c.matches(value))
+    }
+
+    fn map(self, arena: &'a DroplessArena, f: impl Fn(Condition) -> Condition) -> ConditionSet<'a> {
+        ConditionSet(arena.alloc_from_iter(self.iter().map(f)))
+    }
+}
+
+impl<'tcx, 'a> TOFinder<'tcx, 'a> {
+    fn is_empty(&self, state: &State<ConditionSet<'a>>) -> bool {
+        state.all(|cs| cs.0.is_empty())
+    }
+
+    /// Recursion entry point to find threading opportunities.
+    #[instrument(level = "trace", skip(self, cost), ret)]
+    fn find_opportunity(
+        &mut self,
+        bb: BasicBlock,
+        mut state: State<ConditionSet<'a>>,
+        mut cost: CostChecker<'_, 'tcx>,
+        depth: usize,
+    ) {
+        // Do not thread through loop headers.
+        if self.loop_headers.contains(bb) {
+            return;
+        }
+
+        debug!(cost = ?cost.cost());
+        for (statement_index, stmt) in
+            self.body.basic_blocks[bb].statements.iter().enumerate().rev()
+        {
+            if self.is_empty(&state) {
+                return;
+            }
+
+            cost.visit_statement(stmt, Location { block: bb, statement_index });
+            if cost.cost() > MAX_COST {
+                return;
+            }
+
+            // Attempt to turn the `current_condition` on `lhs` into a condition on another place.
+            self.process_statement(bb, stmt, &mut state);
+
+            // When a statement mutates a place, assignments to that place that happen
+            // above the mutation cannot fulfill a condition.
+            //   _1 = 5 // Whatever happens here, it won't change the result of a `SwitchInt`.
+            //   _1 = 6
+            if let Some((lhs, tail)) = self.mutated_statement(stmt) {
+                state.flood_with_tail_elem(lhs.as_ref(), tail, self.map, ConditionSet::default());
+            }
+        }
+
+        if self.is_empty(&state) || depth >= MAX_BACKTRACK {
+            return;
+        }
+
+        let last_non_rec = self.opportunities.len();
+
+        let predecessors = &self.body.basic_blocks.predecessors()[bb];
+        if let &[pred] = &predecessors[..] && bb != START_BLOCK {
+            let term = self.body.basic_blocks[pred].terminator();
+            match term.kind {
+                TerminatorKind::SwitchInt { ref discr, ref targets } => {
+                    self.process_switch_int(discr, targets, bb, &mut state);
+                    self.find_opportunity(pred, state, cost, depth + 1);
+                }
+                _ => self.recurse_through_terminator(pred, &state, &cost, depth),
+            }
+        } else {
+            for &pred in predecessors {
+                self.recurse_through_terminator(pred, &state, &cost, depth);
+            }
+        }
+
+        let new_tos = &mut self.opportunities[last_non_rec..];
+        debug!(?new_tos);
+
+        // Try to deduplicate threading opportunities.
+        if new_tos.len() > 1
+            && new_tos.len() == predecessors.len()
+            && predecessors
+                .iter()
+                .zip(new_tos.iter())
+                .all(|(&pred, to)| to.chain == &[pred] && to.target == new_tos[0].target)
+        {
+            // All predecessors have a threading opportunity, and they all point to the same block.
+            debug!(?new_tos, "dedup");
+            let first = &mut new_tos[0];
+            *first = ThreadingOpportunity { chain: vec![bb], target: first.target };
+            self.opportunities.truncate(last_non_rec + 1);
+            return;
+        }
+
+        for op in self.opportunities[last_non_rec..].iter_mut() {
+            op.chain.push(bb);
+        }
+    }
+
+    /// Extract the mutated place from a statement.
+    ///
+    /// This method returns the `Place` so we can flood the state in case of a partial assignment.
+    ///     (_1 as Ok).0 = _5;
+    ///     (_1 as Err).0 = _6;
+    /// We want to ensure that a `SwitchInt((_1 as Ok).0)` does not see the first assignment, as
+    /// the value may have been mangled by the second assignment.
+    ///
+    /// In case we assign to a discriminant, we return `Some(TrackElem::Discriminant)`, so we can
+    /// stop at flooding the discriminant, and preserve the variant fields.
+    ///     (_1 as Some).0 = _6;
+    ///     SetDiscriminant(_1, 1);
+    ///     switchInt((_1 as Some).0)
+    #[instrument(level = "trace", skip(self), ret)]
+    fn mutated_statement(
+        &self,
+        stmt: &Statement<'tcx>,
+    ) -> Option<(Place<'tcx>, Option<TrackElem>)> {
+        match stmt.kind {
+            StatementKind::Assign(box (place, _))
+            | StatementKind::Deinit(box place) => Some((place, None)),
+            StatementKind::SetDiscriminant { box place, variant_index: _ } => {
+                Some((place, Some(TrackElem::Discriminant)))
+            }
+            StatementKind::StorageLive(local) | StatementKind::StorageDead(local) => {
+                Some((Place::from(local), None))
+            }
+            StatementKind::Retag(..)
+            | StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(..))
+            // copy_nonoverlapping takes pointers and mutated the pointed-to value.
+            | StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(..))
+            | StatementKind::AscribeUserType(..)
+            | StatementKind::Coverage(..)
+            | StatementKind::FakeRead(..)
+            | StatementKind::ConstEvalCounter
+            | StatementKind::PlaceMention(..)
+            | StatementKind::Nop => None,
+        }
+    }
+
+    #[instrument(level = "trace", skip(self))]
+    fn process_operand(
+        &mut self,
+        bb: BasicBlock,
+        lhs: PlaceIndex,
+        rhs: &Operand<'tcx>,
+        state: &mut State<ConditionSet<'a>>,
+    ) -> Option<!> {
+        let register_opportunity = |c: Condition| {
+            debug!(?bb, ?c.target, "register");
+            self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
+        };
+
+        match rhs {
+            // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
+            Operand::Constant(constant) => {
+                let conditions = state.try_get_idx(lhs, self.map)?;
+                let constant =
+                    constant.const_.normalize(self.tcx, self.param_env).try_to_scalar_int()?;
+                conditions.iter_matches(constant).for_each(register_opportunity);
+            }
+            // Transfer the conditions on the copied rhs.
+            Operand::Move(rhs) | Operand::Copy(rhs) => {
+                let rhs = self.map.find(rhs.as_ref())?;
+                state.insert_place_idx(rhs, lhs, self.map);
+            }
+        }
+
+        None
+    }
+
+    #[instrument(level = "trace", skip(self))]
+    fn process_statement(
+        &mut self,
+        bb: BasicBlock,
+        stmt: &Statement<'tcx>,
+        state: &mut State<ConditionSet<'a>>,
+    ) -> Option<!> {
+        let register_opportunity = |c: Condition| {
+            debug!(?bb, ?c.target, "register");
+            self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
+        };
+
+        // Below, `lhs` is the return value of `mutated_statement`,
+        // the place to which `conditions` apply.
+
+        let discriminant_for_variant = |enum_ty: Ty<'tcx>, variant_index| {
+            let discr = enum_ty.discriminant_for_variant(self.tcx, variant_index)?;
+            let discr_layout = self.tcx.layout_of(self.param_env.and(discr.ty)).ok()?;
+            let scalar = ScalarInt::try_from_uint(discr.val, discr_layout.size)?;
+            Some(Operand::const_from_scalar(
+                self.tcx,
+                discr.ty,
+                scalar.into(),
+                rustc_span::DUMMY_SP,
+            ))
+        };
+
+        match &stmt.kind {
+            // If we expect `discriminant(place) ?= A`,
+            // we have an opportunity if `variant_index ?= A`.
+            StatementKind::SetDiscriminant { box place, variant_index } => {
+                let discr_target = self.map.find_discr(place.as_ref())?;
+                let enum_ty = place.ty(self.body, self.tcx).ty;
+                let discr = discriminant_for_variant(enum_ty, *variant_index)?;
+                self.process_operand(bb, discr_target, &discr, state)?;
+            }
+            // If we expect `lhs ?= true`, we have an opportunity if we assume `lhs == true`.
+            StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(
+                Operand::Copy(place) | Operand::Move(place),
+            )) => {
+                let conditions = state.try_get(place.as_ref(), self.map)?;
+                conditions.iter_matches(ScalarInt::TRUE).for_each(register_opportunity);
+            }
+            StatementKind::Assign(box (lhs_place, rhs)) => {
+                if let Some(lhs) = self.map.find(lhs_place.as_ref()) {
+                    match rhs {
+                        Rvalue::Use(operand) => self.process_operand(bb, lhs, operand, state)?,
+                        // Transfer the conditions on the copy rhs.
+                        Rvalue::CopyForDeref(rhs) => {
+                            self.process_operand(bb, lhs, &Operand::Copy(*rhs), state)?
+                        }
+                        Rvalue::Discriminant(rhs) => {
+                            let rhs = self.map.find_discr(rhs.as_ref())?;
+                            state.insert_place_idx(rhs, lhs, self.map);
+                        }
+                        // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
+                        Rvalue::Aggregate(box ref kind, ref operands) => {
+                            let agg_ty = lhs_place.ty(self.body, self.tcx).ty;
+                            let lhs = match kind {
+                                // Do not support unions.
+                                AggregateKind::Adt(.., Some(_)) => return None,
+                                AggregateKind::Adt(_, variant_index, ..) if agg_ty.is_enum() => {
+                                    if let Some(discr_target) = self.map.apply(lhs, TrackElem::Discriminant)
+                                        && let Some(discr_value) = discriminant_for_variant(agg_ty, *variant_index)
+                                    {
+                                        self.process_operand(bb, discr_target, &discr_value, state);
+                                    }
+                                    self.map.apply(lhs, TrackElem::Variant(*variant_index))?
+                                }
+                                _ => lhs,
+                            };
+                            for (field_index, operand) in operands.iter_enumerated() {
+                                if let Some(field) =
+                                    self.map.apply(lhs, TrackElem::Field(field_index))
+                                {
+                                    self.process_operand(bb, field, operand, state);
+                                }
+                            }
+                        }
+                        // Transfer the conditions on the copy rhs, after inversing polarity.
+                        Rvalue::UnaryOp(UnOp::Not, Operand::Move(place) | Operand::Copy(place)) => {
+                            let conditions = state.try_get_idx(lhs, self.map)?;
+                            let place = self.map.find(place.as_ref())?;
+                            let conds = conditions.map(self.arena, Condition::inv);
+                            state.insert_value_idx(place, conds, self.map);
+                        }
+                        // We expect `lhs ?= A`. We found `lhs = Eq(rhs, B)`.
+                        // Create a condition on `rhs ?= B`.
+                        Rvalue::BinaryOp(
+                            op,
+                            box (
+                                Operand::Move(place) | Operand::Copy(place),
+                                Operand::Constant(value),
+                            )
+                            | box (
+                                Operand::Constant(value),
+                                Operand::Move(place) | Operand::Copy(place),
+                            ),
+                        ) => {
+                            let conditions = state.try_get_idx(lhs, self.map)?;
+                            let place = self.map.find(place.as_ref())?;
+                            let equals = match op {
+                                BinOp::Eq => ScalarInt::TRUE,
+                                BinOp::Ne => ScalarInt::FALSE,
+                                _ => return None,
+                            };
+                            let value = value
+                                .const_
+                                .normalize(self.tcx, self.param_env)
+                                .try_to_scalar_int()?;
+                            let conds = conditions.map(self.arena, |c| Condition {
+                                value,
+                                polarity: if c.matches(equals) {
+                                    Polarity::Eq
+                                } else {
+                                    Polarity::Ne
+                                },
+                                ..c
+                            });
+                            state.insert_value_idx(place, conds, self.map);
+                        }
+
+                        _ => {}
+                    }
+                }
+            }
+            _ => {}
+        }
+
+        None
+    }
+
+    #[instrument(level = "trace", skip(self, cost))]
+    fn recurse_through_terminator(
+        &mut self,
+        bb: BasicBlock,
+        state: &State<ConditionSet<'a>>,
+        cost: &CostChecker<'_, 'tcx>,
+        depth: usize,
+    ) {
+        let register_opportunity = |c: Condition| {
+            debug!(?bb, ?c.target, "register");
+            self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
+        };
+
+        let term = self.body.basic_blocks[bb].terminator();
+        let place_to_flood = match term.kind {
+            // We come from a target, so those are not possible.
+            TerminatorKind::UnwindResume
+            | TerminatorKind::UnwindTerminate(_)
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable
+            | TerminatorKind::CoroutineDrop => bug!("{term:?} has no terminators"),
+            // Disallowed during optimizations.
+            TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::Yield { .. } => bug!("{term:?} invalid"),
+            // Cannot reason about inline asm.
+            TerminatorKind::InlineAsm { .. } => return,
+            // `SwitchInt` is handled specially.
+            TerminatorKind::SwitchInt { .. } => return,
+            // We can recurse, no thing particular to do.
+            TerminatorKind::Goto { .. } => None,
+            // Flood the overwritten place, and progress through.
+            TerminatorKind::Drop { place: destination, .. }
+            | TerminatorKind::Call { destination, .. } => Some(destination),
+            // Treat as an `assume(cond == expected)`.
+            TerminatorKind::Assert { ref cond, expected, .. } => {
+                if let Some(place) = cond.place()
+                    && let Some(conditions) = state.try_get(place.as_ref(), self.map)
+                {
+                    let expected = if expected { ScalarInt::TRUE } else { ScalarInt::FALSE };
+                    conditions.iter_matches(expected).for_each(register_opportunity);
+                }
+                None
+            }
+        };
+
+        // We can recurse through this terminator.
+        let mut state = state.clone();
+        if let Some(place_to_flood) = place_to_flood {
+            state.flood_with(place_to_flood.as_ref(), self.map, ConditionSet::default());
+        }
+        self.find_opportunity(bb, state, cost.clone(), depth + 1);
+    }
+
+    #[instrument(level = "trace", skip(self))]
+    fn process_switch_int(
+        &mut self,
+        discr: &Operand<'tcx>,
+        targets: &SwitchTargets,
+        target_bb: BasicBlock,
+        state: &mut State<ConditionSet<'a>>,
+    ) -> Option<!> {
+        debug_assert_ne!(target_bb, START_BLOCK);
+        debug_assert_eq!(self.body.basic_blocks.predecessors()[target_bb].len(), 1);
+
+        let discr = discr.place()?;
+        let discr_ty = discr.ty(self.body, self.tcx).ty;
+        let discr_layout = self.tcx.layout_of(self.param_env.and(discr_ty)).ok()?;
+        let conditions = state.try_get(discr.as_ref(), self.map)?;
+
+        if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) {
+            let value = ScalarInt::try_from_uint(value, discr_layout.size)?;
+            debug_assert_eq!(targets.iter().filter(|&(_, target)| target == target_bb).count(), 1);
+
+            // We are inside `target_bb`. Since we have a single predecessor, we know we passed
+            // through the `SwitchInt` before arriving here. Therefore, we know that
+            // `discr == value`. If one condition can be fulfilled by `discr == value`,
+            // that's an opportunity.
+            for c in conditions.iter_matches(value) {
+                debug!(?target_bb, ?c.target, "register");
+                self.opportunities.push(ThreadingOpportunity { chain: vec![], target: c.target });
+            }
+        } else if let Some((value, _, else_bb)) = targets.as_static_if()
+            && target_bb == else_bb
+        {
+            let value = ScalarInt::try_from_uint(value, discr_layout.size)?;
+
+            // We only know that `discr != value`. That's much weaker information than
+            // the equality we had in the previous arm. All we can conclude is that
+            // the replacement condition `discr != value` can be threaded, and nothing else.
+            for c in conditions.iter() {
+                if c.value == value && c.polarity == Polarity::Ne {
+                    debug!(?target_bb, ?c.target, "register");
+                    self.opportunities
+                        .push(ThreadingOpportunity { chain: vec![], target: c.target });
+                }
+            }
+        }
+
+        None
+    }
+}
+
+struct OpportunitySet {
+    opportunities: Vec<ThreadingOpportunity>,
+    /// For each bb, give the TOs in which it appears. The pair corresponds to the index
+    /// in `opportunities` and the index in `ThreadingOpportunity::chain`.
+    involving_tos: IndexVec<BasicBlock, Vec<(usize, usize)>>,
+    /// Cache the number of predecessors for each block, as we clear the basic block cache..
+    predecessors: IndexVec<BasicBlock, usize>,
+}
+
+impl OpportunitySet {
+    fn new(body: &Body<'_>, opportunities: Vec<ThreadingOpportunity>) -> OpportunitySet {
+        let mut involving_tos = IndexVec::from_elem(Vec::new(), &body.basic_blocks);
+        for (index, to) in opportunities.iter().enumerate() {
+            for (ibb, &bb) in to.chain.iter().enumerate() {
+                involving_tos[bb].push((index, ibb));
+            }
+            involving_tos[to.target].push((index, to.chain.len()));
+        }
+        let predecessors = predecessor_count(body);
+        OpportunitySet { opportunities, involving_tos, predecessors }
+    }
+
+    /// Apply the opportunities on the graph.
+    fn apply(&mut self, body: &mut Body<'_>) {
+        for i in 0..self.opportunities.len() {
+            self.apply_once(i, body);
+        }
+    }
+
+    #[instrument(level = "trace", skip(self, body))]
+    fn apply_once(&mut self, index: usize, body: &mut Body<'_>) {
+        debug!(?self.predecessors);
+        debug!(?self.involving_tos);
+
+        // Check that `predecessors` satisfies its invariant.
+        debug_assert_eq!(self.predecessors, predecessor_count(body));
+
+        // Remove the TO from the vector to allow modifying the other ones later.
+        let op = &mut self.opportunities[index];
+        debug!(?op);
+        let op_chain = std::mem::take(&mut op.chain);
+        let op_target = op.target;
+        debug_assert_eq!(op_chain.len(), op_chain.iter().collect::<FxHashSet<_>>().len());
+
+        let Some((current, chain)) = op_chain.split_first() else { return };
+        let basic_blocks = body.basic_blocks.as_mut();
+
+        // Invariant: the control-flow is well-formed at the end of each iteration.
+        let mut current = *current;
+        for &succ in chain {
+            debug!(?current, ?succ);
+
+            // `succ` must be a successor of `current`. If it is not, this means this TO is not
+            // satisfiable and a previous TO erased this edge, so we bail out.
+            if basic_blocks[current].terminator().successors().find(|s| *s == succ).is_none() {
+                debug!("impossible");
+                return;
+            }
+
+            // Fast path: `succ` is only used once, so we can reuse it directly.
+            if self.predecessors[succ] == 1 {
+                debug!("single");
+                current = succ;
+                continue;
+            }
+
+            let new_succ = basic_blocks.push(basic_blocks[succ].clone());
+            debug!(?new_succ);
+
+            // Replace `succ` by `new_succ` where it appears.
+            let mut num_edges = 0;
+            for s in basic_blocks[current].terminator_mut().successors_mut() {
+                if *s == succ {
+                    *s = new_succ;
+                    num_edges += 1;
+                }
+            }
+
+            // Update predecessors with the new block.
+            let _new_succ = self.predecessors.push(num_edges);
+            debug_assert_eq!(new_succ, _new_succ);
+            self.predecessors[succ] -= num_edges;
+            self.update_predecessor_count(basic_blocks[new_succ].terminator(), Update::Incr);
+
+            // Replace the `current -> succ` edge by `current -> new_succ` in all the following
+            // TOs. This is necessary to avoid trying to thread through a non-existing edge. We
+            // use `involving_tos` here to avoid traversing the full set of TOs on each iteration.
+            let mut new_involved = Vec::new();
+            for &(to_index, in_to_index) in &self.involving_tos[current] {
+                // That TO has already been applied, do nothing.
+                if to_index <= index {
+                    continue;
+                }
+
+                let other_to = &mut self.opportunities[to_index];
+                if other_to.chain.get(in_to_index) != Some(&current) {
+                    continue;
+                }
+                let s = other_to.chain.get_mut(in_to_index + 1).unwrap_or(&mut other_to.target);
+                if *s == succ {
+                    // `other_to` references the `current -> succ` edge, so replace `succ`.
+                    *s = new_succ;
+                    new_involved.push((to_index, in_to_index + 1));
+                }
+            }
+
+            // The TOs that we just updated now reference `new_succ`. Update `involving_tos`
+            // in case we need to duplicate an edge starting at `new_succ` later.
+            let _new_succ = self.involving_tos.push(new_involved);
+            debug_assert_eq!(new_succ, _new_succ);
+
+            current = new_succ;
+        }
+
+        let current = &mut basic_blocks[current];
+        self.update_predecessor_count(current.terminator(), Update::Decr);
+        current.terminator_mut().kind = TerminatorKind::Goto { target: op_target };
+        self.predecessors[op_target] += 1;
+    }
+
+    fn update_predecessor_count(&mut self, terminator: &Terminator<'_>, incr: Update) {
+        match incr {
+            Update::Incr => {
+                for s in terminator.successors() {
+                    self.predecessors[s] += 1;
+                }
+            }
+            Update::Decr => {
+                for s in terminator.successors() {
+                    self.predecessors[s] -= 1;
+                }
+            }
+        }
+    }
+}
+
+fn predecessor_count(body: &Body<'_>) -> IndexVec<BasicBlock, usize> {
+    let mut predecessors: IndexVec<_, _> =
+        body.basic_blocks.predecessors().iter().map(|ps| ps.len()).collect();
+    predecessors[START_BLOCK] += 1; // Account for the implicit entry edge.
+    predecessors
+}
+
+enum Update {
+    Incr,
+    Decr,
+}
+
+/// Compute the set of loop headers in the given body. We define a loop header as a block which has
+/// at least a predecessor which it dominates. This definition is only correct for reducible CFGs.
+/// But if the CFG is already irreducible, there is no point in trying much harder.
+/// is already irreducible.
+fn loop_headers(body: &Body<'_>) -> BitSet<BasicBlock> {
+    let mut loop_headers = BitSet::new_empty(body.basic_blocks.len());
+    let dominators = body.basic_blocks.dominators();
+    // Only visit reachable blocks.
+    for (bb, bbdata) in traversal::preorder(body) {
+        for succ in bbdata.terminator().successors() {
+            if dominators.dominates(succ, bb) {
+                loop_headers.insert(succ);
+            }
+        }
+    }
+    loop_headers
+}
diff --git a/compiler/rustc_mir_transform/src/large_enums.rs b/compiler/rustc_mir_transform/src/large_enums.rs
index 886ff760481..0a8b13d6677 100644
--- a/compiler/rustc_mir_transform/src/large_enums.rs
+++ b/compiler/rustc_mir_transform/src/large_enums.rs
@@ -30,6 +30,9 @@ pub struct EnumSizeOpt {
 
 impl<'tcx> MirPass<'tcx> for EnumSizeOpt {
     fn is_enabled(&self, sess: &Session) -> bool {
+        // There are some differences in behavior on wasm and ARM that are not properly
+        // understood, so we conservatively treat this optimization as unsound:
+        // https://github.com/rust-lang/rust/pull/85158#issuecomment-1101836457
         sess.opts.unstable_opts.unsound_mir_opts || sess.mir_opt_level() >= 3
     }
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
index 754f2ee8376..9aaa54110bd 100644
--- a/compiler/rustc_mir_transform/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -54,13 +54,17 @@ mod check_packed_ref;
 pub mod check_unsafety;
 mod remove_place_mention;
 // This pass is public to allow external drivers to perform MIR cleanup
+mod add_subtyping_projections;
 pub mod cleanup_post_borrowck;
 mod const_debuginfo;
 mod const_goto;
 mod const_prop;
 mod const_prop_lint;
 mod copy_prop;
+mod coroutine;
+mod cost_checker;
 mod coverage;
+mod cross_crate_inline;
 mod ctfe_limit;
 mod dataflow_const_prop;
 mod dead_store_elimination;
@@ -75,10 +79,10 @@ mod elaborate_drops;
 mod errors;
 mod ffi_unwind_calls;
 mod function_item_references;
-mod generator;
 mod gvn;
 pub mod inline;
 mod instsimplify;
+mod jump_threading;
 mod large_enums;
 mod lower_intrinsics;
 mod lower_slice_len;
@@ -122,6 +126,7 @@ pub fn provide(providers: &mut Providers) {
     coverage::query::provide(providers);
     ffi_unwind_calls::provide(providers);
     shim::provide(providers);
+    cross_crate_inline::provide(providers);
     *providers = Providers {
         mir_keys,
         mir_const,
@@ -129,7 +134,7 @@ pub fn provide(providers: &mut Providers) {
         mir_promoted,
         mir_drops_elaborated_and_const_checked,
         mir_for_ctfe,
-        mir_generator_witnesses: generator::mir_generator_witnesses,
+        mir_coroutine_witnesses: coroutine::mir_coroutine_witnesses,
         optimized_mir,
         is_mir_available,
         is_ctfe_mir_available: |tcx, did| is_mir_available(tcx, did),
@@ -161,37 +166,50 @@ fn remap_mir_for_const_eval_select<'tcx>(
                 && tcx.item_name(def_id) == sym::const_eval_select
                 && tcx.is_intrinsic(def_id) =>
             {
-                let [tupled_args, called_in_const, called_at_rt]: [_; 3] = std::mem::take(args).try_into().unwrap();
+                let [tupled_args, called_in_const, called_at_rt]: [_; 3] =
+                    std::mem::take(args).try_into().unwrap();
                 let ty = tupled_args.ty(&body.local_decls, tcx);
                 let fields = ty.tuple_fields();
                 let num_args = fields.len();
-                let func = if context == hir::Constness::Const { called_in_const } else { called_at_rt };
-                let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) = match tupled_args {
-                    Operand::Constant(_) => {
-                        // there is no good way of extracting a tuple arg from a constant (const generic stuff)
-                        // so we just create a temporary and deconstruct that.
-                        let local = body.local_decls.push(LocalDecl::new(ty, fn_span));
-                        bb.statements.push(Statement {
-                            source_info: SourceInfo::outermost(fn_span),
-                            kind: StatementKind::Assign(Box::new((local.into(), Rvalue::Use(tupled_args.clone())))),
-                        });
-                        (Operand::Move, local.into())
-                    }
-                    Operand::Move(place) => (Operand::Move, place),
-                    Operand::Copy(place) => (Operand::Copy, place),
-                };
-                let place_elems = place.projection;
-                let arguments = (0..num_args).map(|x| {
-                    let mut place_elems = place_elems.to_vec();
-                    place_elems.push(ProjectionElem::Field(x.into(), fields[x]));
-                    let projection = tcx.mk_place_elems(&place_elems);
-                    let place = Place {
-                        local: place.local,
-                        projection,
+                let func =
+                    if context == hir::Constness::Const { called_in_const } else { called_at_rt };
+                let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) =
+                    match tupled_args {
+                        Operand::Constant(_) => {
+                            // there is no good way of extracting a tuple arg from a constant (const generic stuff)
+                            // so we just create a temporary and deconstruct that.
+                            let local = body.local_decls.push(LocalDecl::new(ty, fn_span));
+                            bb.statements.push(Statement {
+                                source_info: SourceInfo::outermost(fn_span),
+                                kind: StatementKind::Assign(Box::new((
+                                    local.into(),
+                                    Rvalue::Use(tupled_args.clone()),
+                                ))),
+                            });
+                            (Operand::Move, local.into())
+                        }
+                        Operand::Move(place) => (Operand::Move, place),
+                        Operand::Copy(place) => (Operand::Copy, place),
                     };
-                    method(place)
-                }).collect();
-                terminator.kind = TerminatorKind::Call { func, args: arguments, destination, target, unwind, call_source: CallSource::Misc, fn_span };
+                let place_elems = place.projection;
+                let arguments = (0..num_args)
+                    .map(|x| {
+                        let mut place_elems = place_elems.to_vec();
+                        place_elems.push(ProjectionElem::Field(x.into(), fields[x]));
+                        let projection = tcx.mk_place_elems(&place_elems);
+                        let place = Place { local: place.local, projection };
+                        method(place)
+                    })
+                    .collect();
+                terminator.kind = TerminatorKind::Call {
+                    func,
+                    args: arguments,
+                    destination,
+                    target,
+                    unwind,
+                    call_source: CallSource::Misc,
+                    fn_span,
+                };
             }
             _ => {}
         }
@@ -359,15 +377,15 @@ fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> {
 /// mir borrowck *before* doing so in order to ensure that borrowck can be run and doesn't
 /// end up missing the source MIR due to stealing happening.
 fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> {
-    if let DefKind::Generator = tcx.def_kind(def) {
-        tcx.ensure_with_value().mir_generator_witnesses(def);
+    if let DefKind::Coroutine = tcx.def_kind(def) {
+        tcx.ensure_with_value().mir_coroutine_witnesses(def);
     }
     let mir_borrowck = tcx.mir_borrowck(def);
 
     let is_fn_like = tcx.def_kind(def).is_fn_like();
     if is_fn_like {
         // Do not compute the mir call graph without said call graph actually being used.
-        if inline::Inline.is_enabled(&tcx.sess) {
+        if pm::should_run_pass(tcx, &inline::Inline) {
             tcx.ensure_with_value().mir_inliner_callees(ty::InstanceDef::Item(def.to_def_id()));
         }
     }
@@ -481,6 +499,7 @@ fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         // These next passes must be executed together
         &add_call_guards::CriticalCallEdges,
         &reveal_all::RevealAll, // has to be done before drop elaboration, since we need to drop opaque types, too.
+        &add_subtyping_projections::Subtyper, // calling this after reveal_all ensures that we don't deal with opaque types
         &elaborate_drops::ElaborateDrops,
         // This will remove extraneous landing pads which are no longer
         // necessary as well as well as forcing any call in a non-unwinding
@@ -492,9 +511,9 @@ fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         // `AddRetag` needs to run after `ElaborateDrops`. Otherwise it should run fairly late,
         // but before optimizations begin.
         &elaborate_box_derefs::ElaborateBoxDerefs,
-        &generator::StateTransform,
+        &coroutine::StateTransform,
         &add_retag::AddRetag,
-        &Lint(const_prop_lint::ConstProp),
+        &Lint(const_prop_lint::ConstPropLint),
     ];
     pm::run_passes_no_validate(tcx, body, passes, Some(MirPhase::Runtime(RuntimePhase::Initial)));
 }
@@ -552,10 +571,9 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
             &const_prop::ConstProp,
             &gvn::GVN,
             &dataflow_const_prop::DataflowConstProp,
-            //
-            // Const-prop runs unconditionally, but doesn't mutate the MIR at mir-opt-level=0.
             &const_debuginfo::ConstDebugInfo,
             &o1(simplify_branches::SimplifyConstCondition::AfterConstProp),
+            &jump_threading::JumpThreading,
             &early_otherwise_branch::EarlyOtherwiseBranch,
             &simplify_comparison_integral::SimplifyComparisonIntegral,
             &dead_store_elimination::DeadStoreElimination,
@@ -611,6 +629,15 @@ fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> {
         return body;
     }
 
+    // If `mir_drops_elaborated_and_const_checked` found that the current body has unsatisfiable
+    // predicates, it will shrink the MIR to a single `unreachable` terminator.
+    // More generally, if MIR is a lone `unreachable`, there is nothing to optimize.
+    if let TerminatorKind::Unreachable = body.basic_blocks[START_BLOCK].terminator().kind
+        && body.basic_blocks[START_BLOCK].statements.is_empty()
+    {
+        return body;
+    }
+
     run_optimization_passes(tcx, &mut body);
 
     body
diff --git a/compiler/rustc_mir_transform/src/lower_intrinsics.rs b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
index 0d2d764c422..5f3d8dfc6c4 100644
--- a/compiler/rustc_mir_transform/src/lower_intrinsics.rs
+++ b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
@@ -2,9 +2,8 @@
 
 use crate::MirPass;
 use rustc_middle::mir::*;
-use rustc_middle::ty::GenericArgsRef;
-use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_span::symbol::{sym, Symbol};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::symbol::sym;
 use rustc_target::abi::{FieldIdx, VariantIdx};
 
 pub struct LowerIntrinsics;
@@ -16,12 +15,10 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
             let terminator = block.terminator.as_mut().unwrap();
             if let TerminatorKind::Call { func, args, destination, target, .. } =
                 &mut terminator.kind
+                && let ty::FnDef(def_id, generic_args) = *func.ty(local_decls, tcx).kind()
+                && tcx.is_intrinsic(def_id)
             {
-                let func_ty = func.ty(local_decls, tcx);
-                let Some((intrinsic_name, generic_args)) = resolve_rust_intrinsic(tcx, func_ty)
-                else {
-                    continue;
-                };
+                let intrinsic_name = tcx.item_name(def_id);
                 match intrinsic_name {
                     sym::unreachable => {
                         terminator.kind = TerminatorKind::Unreachable;
@@ -169,12 +166,16 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
                         let [arg] = args.as_slice() else {
                             span_bug!(terminator.source_info.span, "Wrong number of arguments");
                         };
-                        let derefed_place =
-                            if let Some(place) = arg.place() && let Some(local) = place.as_local() {
-                                tcx.mk_place_deref(local.into())
-                            } else {
-                                span_bug!(terminator.source_info.span, "Only passing a local is supported");
-                            };
+                        let derefed_place = if let Some(place) = arg.place()
+                            && let Some(local) = place.as_local()
+                        {
+                            tcx.mk_place_deref(local.into())
+                        } else {
+                            span_bug!(
+                                terminator.source_info.span,
+                                "Only passing a local is supported"
+                            );
+                        };
                         // Add new statement at the end of the block that does the read, and patch
                         // up the terminator.
                         block.statements.push(Statement {
@@ -201,12 +202,16 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
                                 "Wrong number of arguments for write_via_move intrinsic",
                             );
                         };
-                        let derefed_place =
-                            if let Some(place) = ptr.place() && let Some(local) = place.as_local() {
-                                tcx.mk_place_deref(local.into())
-                            } else {
-                                span_bug!(terminator.source_info.span, "Only passing a local is supported");
-                            };
+                        let derefed_place = if let Some(place) = ptr.place()
+                            && let Some(local) = place.as_local()
+                        {
+                            tcx.mk_place_deref(local.into())
+                        } else {
+                            span_bug!(
+                                terminator.source_info.span,
+                                "Only passing a local is supported"
+                            );
+                        };
                         block.statements.push(Statement {
                             source_info: terminator.source_info,
                             kind: StatementKind::Assign(Box::new((
@@ -309,15 +314,3 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
         }
     }
 }
-
-fn resolve_rust_intrinsic<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    func_ty: Ty<'tcx>,
-) -> Option<(Symbol, GenericArgsRef<'tcx>)> {
-    if let ty::FnDef(def_id, args) = *func_ty.kind() {
-        if tcx.is_intrinsic(def_id) {
-            return Some((tcx.item_name(def_id), args));
-        }
-    }
-    None
-}
diff --git a/compiler/rustc_mir_transform/src/lower_slice_len.rs b/compiler/rustc_mir_transform/src/lower_slice_len.rs
index b7cc0db9559..ae487841179 100644
--- a/compiler/rustc_mir_transform/src/lower_slice_len.rs
+++ b/compiler/rustc_mir_transform/src/lower_slice_len.rs
@@ -34,67 +34,43 @@ pub fn lower_slice_len_calls<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     }
 }
 
-struct SliceLenPatchInformation<'tcx> {
-    add_statement: Statement<'tcx>,
-    new_terminator_kind: TerminatorKind<'tcx>,
-}
-
 fn lower_slice_len_call<'tcx>(
     tcx: TyCtxt<'tcx>,
     block: &mut BasicBlockData<'tcx>,
     local_decls: &IndexSlice<Local, LocalDecl<'tcx>>,
     slice_len_fn_item_def_id: DefId,
 ) {
-    let mut patch_found: Option<SliceLenPatchInformation<'_>> = None;
-
     let terminator = block.terminator();
-    match &terminator.kind {
-        TerminatorKind::Call {
-            func,
-            args,
-            destination,
-            target: Some(bb),
-            call_source: CallSource::Normal,
-            ..
-        } => {
-            // some heuristics for fast rejection
-            if args.len() != 1 {
-                return;
-            }
-            let Some(arg) = args[0].place() else { return };
-            let func_ty = func.ty(local_decls, tcx);
-            match func_ty.kind() {
-                ty::FnDef(fn_def_id, _) if fn_def_id == &slice_len_fn_item_def_id => {
-                    // perform modifications
-                    // from something like `_5 = core::slice::<impl [u8]>::len(move _6) -> bb1`
-                    // into:
-                    // ```
-                    // _5 = Len(*_6)
-                    // goto bb1
-                    // ```
+    if let TerminatorKind::Call {
+        func,
+        args,
+        destination,
+        target: Some(bb),
+        call_source: CallSource::Normal,
+        ..
+    } = &terminator.kind
+        // some heuristics for fast rejection
+        && let [arg] = &args[..]
+        && let Some(arg) = arg.place()
+        && let ty::FnDef(fn_def_id, _) = func.ty(local_decls, tcx).kind()
+        && *fn_def_id == slice_len_fn_item_def_id
+    {
+        // perform modifications from something like:
+        //     _5 = core::slice::<impl [u8]>::len(move _6) -> bb1
+        // into:
+        //     _5 = Len(*_6)
+        //     goto bb1
 
-                    // make new RValue for Len
-                    let deref_arg = tcx.mk_place_deref(arg);
-                    let r_value = Rvalue::Len(deref_arg);
-                    let len_statement_kind =
-                        StatementKind::Assign(Box::new((*destination, r_value)));
-                    let add_statement =
-                        Statement { kind: len_statement_kind, source_info: terminator.source_info };
+        // make new RValue for Len
+        let deref_arg = tcx.mk_place_deref(arg);
+        let r_value = Rvalue::Len(deref_arg);
+        let len_statement_kind = StatementKind::Assign(Box::new((*destination, r_value)));
+        let add_statement =
+            Statement { kind: len_statement_kind, source_info: terminator.source_info };
 
-                    // modify terminator into simple Goto
-                    let new_terminator_kind = TerminatorKind::Goto { target: *bb };
-
-                    let patch = SliceLenPatchInformation { add_statement, new_terminator_kind };
-
-                    patch_found = Some(patch);
-                }
-                _ => {}
-            }
-        }
-        _ => {}
-    }
+        // modify terminator into simple Goto
+        let new_terminator_kind = TerminatorKind::Goto { target: *bb };
 
-    if let Some(SliceLenPatchInformation { add_statement, new_terminator_kind }) = patch_found {
         block.statements.push(add_statement);
         block.terminator_mut().kind = new_terminator_kind;
     }
diff --git a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs
index c97d034544a..c9b42e75cb2 100644
--- a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs
+++ b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs
@@ -38,6 +38,6 @@ impl<'tcx> MirPass<'tcx> for MultipleReturnTerminators {
             }
         }
 
-        simplify::remove_dead_blocks(tcx, body)
+        simplify::remove_dead_blocks(body)
     }
 }
diff --git a/compiler/rustc_mir_transform/src/normalize_array_len.rs b/compiler/rustc_mir_transform/src/normalize_array_len.rs
index d1a4b26a046..206cdf9fe28 100644
--- a/compiler/rustc_mir_transform/src/normalize_array_len.rs
+++ b/compiler/rustc_mir_transform/src/normalize_array_len.rs
@@ -57,7 +57,9 @@ fn compute_slice_length<'tcx>(
             }
             // The length information is stored in the fat pointer, so we treat `operand` as a value.
             Rvalue::Use(operand) => {
-                if let Some(rhs) = operand.place() && let Some(rhs) = rhs.as_local() {
+                if let Some(rhs) = operand.place()
+                    && let Some(rhs) = rhs.as_local()
+                {
                     slice_lengths[local] = slice_lengths[rhs];
                 }
             }
diff --git a/compiler/rustc_mir_transform/src/nrvo.rs b/compiler/rustc_mir_transform/src/nrvo.rs
index e1298b0654f..ff309bd10ec 100644
--- a/compiler/rustc_mir_transform/src/nrvo.rs
+++ b/compiler/rustc_mir_transform/src/nrvo.rs
@@ -34,7 +34,7 @@ pub struct RenameReturnPlace;
 
 impl<'tcx> MirPass<'tcx> for RenameReturnPlace {
     fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
-        // #111005
+        // unsound: #111005
         sess.mir_opt_level() > 0 && sess.opts.unstable_opts.unsound_mir_opts
     }
 
diff --git a/compiler/rustc_mir_transform/src/pass_manager.rs b/compiler/rustc_mir_transform/src/pass_manager.rs
index 5abb2f3d041..a8aba29adcd 100644
--- a/compiler/rustc_mir_transform/src/pass_manager.rs
+++ b/compiler/rustc_mir_transform/src/pass_manager.rs
@@ -83,6 +83,25 @@ pub fn run_passes<'tcx>(
     run_passes_inner(tcx, body, passes, phase_change, true);
 }
 
+pub fn should_run_pass<'tcx, P>(tcx: TyCtxt<'tcx>, pass: &P) -> bool
+where
+    P: MirPass<'tcx> + ?Sized,
+{
+    let name = pass.name();
+
+    let overridden_passes = &tcx.sess.opts.unstable_opts.mir_enable_passes;
+    let overridden =
+        overridden_passes.iter().rev().find(|(s, _)| s == &*name).map(|(_name, polarity)| {
+            trace!(
+                pass = %name,
+                "{} as requested by flag",
+                if *polarity { "Running" } else { "Not running" },
+            );
+            *polarity
+        });
+    overridden.unwrap_or_else(|| pass.is_enabled(&tcx.sess))
+}
+
 fn run_passes_inner<'tcx>(
     tcx: TyCtxt<'tcx>,
     body: &mut Body<'tcx>,
@@ -100,19 +119,9 @@ fn run_passes_inner<'tcx>(
         for pass in passes {
             let name = pass.name();
 
-            let overridden = overridden_passes.iter().rev().find(|(s, _)| s == &*name).map(
-                |(_name, polarity)| {
-                    trace!(
-                        pass = %name,
-                        "{} as requested by flag",
-                        if *polarity { "Running" } else { "Not running" },
-                    );
-                    *polarity
-                },
-            );
-            if !overridden.unwrap_or_else(|| pass.is_enabled(&tcx.sess)) {
+            if !should_run_pass(tcx, *pass) {
                 continue;
-            }
+            };
 
             let dump_enabled = pass.is_mir_dump_enabled();
 
diff --git a/compiler/rustc_mir_transform/src/ref_prop.rs b/compiler/rustc_mir_transform/src/ref_prop.rs
index 67941cf4395..df39c819ba9 100644
--- a/compiler/rustc_mir_transform/src/ref_prop.rs
+++ b/compiler/rustc_mir_transform/src/ref_prop.rs
@@ -210,14 +210,17 @@ fn compute_replacement<'tcx>(
             // have been visited before.
             Rvalue::Use(Operand::Copy(place) | Operand::Move(place))
             | Rvalue::CopyForDeref(place) => {
-                if let Some(rhs) = place.as_local() && ssa.is_ssa(rhs) {
+                if let Some(rhs) = place.as_local()
+                    && ssa.is_ssa(rhs)
+                {
                     let target = targets[rhs];
                     // Only see through immutable reference and pointers, as we do not know yet if
                     // mutable references are fully replaced.
                     if !needs_unique && matches!(target, Value::Pointer(..)) {
                         targets[local] = target;
                     } else {
-                        targets[local] = Value::Pointer(tcx.mk_place_deref(rhs.into()), needs_unique);
+                        targets[local] =
+                            Value::Pointer(tcx.mk_place_deref(rhs.into()), needs_unique);
                     }
                 }
             }
@@ -365,7 +368,7 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'tcx> {
                 *place = Place::from(target.local).project_deeper(rest, self.tcx);
                 self.any_replacement = true;
             } else {
-                break
+                break;
             }
         }
 
diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
index 8c48a667786..54892442c87 100644
--- a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
+++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
@@ -69,7 +69,7 @@ impl RemoveNoopLandingPads {
             | TerminatorKind::FalseUnwind { .. } => {
                 terminator.successors().all(|succ| nop_landing_pads.contains(succ))
             }
-            TerminatorKind::GeneratorDrop
+            TerminatorKind::CoroutineDrop
             | TerminatorKind::Yield { .. }
             | TerminatorKind::Return
             | TerminatorKind::UnwindTerminate(_)
diff --git a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
index 26384974798..87fee2410ec 100644
--- a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
+++ b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
@@ -24,11 +24,8 @@ pub struct RemoveUninitDrops;
 impl<'tcx> MirPass<'tcx> for RemoveUninitDrops {
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         let param_env = tcx.param_env(body.source.def_id());
-        let Ok(move_data) = MoveData::gather_moves(body, tcx, param_env) else {
-            // We could continue if there are move errors, but there's not much point since our
-            // init data isn't complete.
-            return;
-        };
+        let move_data =
+            MoveData::gather_moves(&body, tcx, param_env, |ty| ty.needs_drop(tcx, param_env));
 
         let mdpe = MoveDataParamEnv { move_data, param_env };
         let mut maybe_inits = MaybeInitializedPlaces::new(tcx, body, &mdpe)
diff --git a/compiler/rustc_mir_transform/src/remove_zsts.rs b/compiler/rustc_mir_transform/src/remove_zsts.rs
index a34d4b02764..5aa3c3cfe4d 100644
--- a/compiler/rustc_mir_transform/src/remove_zsts.rs
+++ b/compiler/rustc_mir_transform/src/remove_zsts.rs
@@ -13,8 +13,8 @@ impl<'tcx> MirPass<'tcx> for RemoveZsts {
     }
 
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-        // Avoid query cycles (generators require optimized MIR for layout).
-        if tcx.type_of(body.source.def_id()).instantiate_identity().is_generator() {
+        // Avoid query cycles (coroutines require optimized MIR for layout).
+        if tcx.type_of(body.source.def_id()).instantiate_identity().is_coroutine() {
             return;
         }
         let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
@@ -126,7 +126,10 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> {
             && let ty = place_for_ty.ty(self.local_decls, self.tcx).ty
             && self.known_to_be_zst(ty)
             && self.tcx.consider_optimizing(|| {
-                format!("RemoveZsts - Place: {:?} SourceInfo: {:?}", place_for_ty, statement.source_info)
+                format!(
+                    "RemoveZsts - Place: {:?} SourceInfo: {:?}",
+                    place_for_ty, statement.source_info
+                )
             })
         {
             statement.make_nop();
diff --git a/compiler/rustc_mir_transform/src/reveal_all.rs b/compiler/rustc_mir_transform/src/reveal_all.rs
index 55f1eac6f84..1626cf3c035 100644
--- a/compiler/rustc_mir_transform/src/reveal_all.rs
+++ b/compiler/rustc_mir_transform/src/reveal_all.rs
@@ -46,16 +46,18 @@ impl<'tcx> MutVisitor<'tcx> for RevealAllVisitor<'tcx> {
                 .filter(|elem| !matches!(elem, ProjectionElem::OpaqueCast(_)))
                 .collect::<Vec<_>>(),
         );
+        self.super_place(place, _context, _location);
     }
 
     #[inline]
-    fn visit_constant(&mut self, constant: &mut ConstOperand<'tcx>, _: Location) {
+    fn visit_constant(&mut self, constant: &mut ConstOperand<'tcx>, location: Location) {
         // We have to use `try_normalize_erasing_regions` here, since it's
         // possible that we visit impossible-to-satisfy where clauses here,
         // see #91745
         if let Ok(c) = self.tcx.try_normalize_erasing_regions(self.param_env, constant.const_) {
             constant.const_ = c;
         }
+        self.super_constant(constant, location);
     }
 
     #[inline]
diff --git a/compiler/rustc_mir_transform/src/separate_const_switch.rs b/compiler/rustc_mir_transform/src/separate_const_switch.rs
index e1e4acccccd..907cfe7581a 100644
--- a/compiler/rustc_mir_transform/src/separate_const_switch.rs
+++ b/compiler/rustc_mir_transform/src/separate_const_switch.rs
@@ -118,7 +118,7 @@ pub fn separate_const_switch(body: &mut Body<'_>) -> usize {
                         | TerminatorKind::Return
                         | TerminatorKind::Unreachable
                         | TerminatorKind::InlineAsm { .. }
-                        | TerminatorKind::GeneratorDrop => {
+                        | TerminatorKind::CoroutineDrop => {
                             continue 'predec_iter;
                         }
                     }
@@ -169,7 +169,7 @@ pub fn separate_const_switch(body: &mut Body<'_>) -> usize {
             | TerminatorKind::UnwindTerminate(_)
             | TerminatorKind::Return
             | TerminatorKind::Unreachable
-            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::CoroutineDrop
             | TerminatorKind::Assert { .. }
             | TerminatorKind::FalseUnwind { .. }
             | TerminatorKind::Drop { .. }
diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs
index e9895d97dfe..2400cfa21fb 100644
--- a/compiler/rustc_mir_transform/src/shim.rs
+++ b/compiler/rustc_mir_transform/src/shim.rs
@@ -4,7 +4,7 @@ use rustc_hir::lang_items::LangItem;
 use rustc_middle::mir::*;
 use rustc_middle::query::Providers;
 use rustc_middle::ty::GenericArgs;
-use rustc_middle::ty::{self, EarlyBinder, GeneratorArgs, Ty, TyCtxt};
+use rustc_middle::ty::{self, CoroutineArgs, EarlyBinder, Ty, TyCtxt};
 use rustc_target::abi::{FieldIdx, VariantIdx, FIRST_VARIANT};
 
 use rustc_index::{Idx, IndexVec};
@@ -67,10 +67,10 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'
         }
 
         ty::InstanceDef::DropGlue(def_id, ty) => {
-            // FIXME(#91576): Drop shims for generators aren't subject to the MIR passes at the end
+            // FIXME(#91576): Drop shims for coroutines aren't subject to the MIR passes at the end
             // of this function. Is this intentional?
-            if let Some(ty::Generator(gen_def_id, args, _)) = ty.map(Ty::kind) {
-                let body = tcx.optimized_mir(*gen_def_id).generator_drop().unwrap();
+            if let Some(ty::Coroutine(gen_def_id, args, _)) = ty.map(Ty::kind) {
+                let body = tcx.optimized_mir(*gen_def_id).coroutine_drop().unwrap();
                 let mut body = EarlyBinder::bind(body.clone()).instantiate(tcx, args);
                 debug!("make_shim({:?}) = {:?}", instance, body);
 
@@ -171,7 +171,7 @@ fn local_decls_for_sig<'tcx>(
 fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>) -> Body<'tcx> {
     debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty);
 
-    assert!(!matches!(ty, Some(ty) if ty.is_generator()));
+    assert!(!matches!(ty, Some(ty) if ty.is_coroutine()));
 
     let args = if let Some(ty) = ty {
         tcx.mk_args(&[ty.into()])
@@ -392,8 +392,8 @@ fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -
         _ if is_copy => builder.copy_shim(),
         ty::Closure(_, args) => builder.tuple_like_shim(dest, src, args.as_closure().upvar_tys()),
         ty::Tuple(..) => builder.tuple_like_shim(dest, src, self_ty.tuple_fields()),
-        ty::Generator(gen_def_id, args, hir::Movability::Movable) => {
-            builder.generator_shim(dest, src, *gen_def_id, args.as_generator())
+        ty::Coroutine(gen_def_id, args, hir::Movability::Movable) => {
+            builder.coroutine_shim(dest, src, *gen_def_id, args.as_coroutine())
         }
         _ => bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty),
     };
@@ -593,12 +593,12 @@ impl<'tcx> CloneShimBuilder<'tcx> {
         let _final_cleanup_block = self.clone_fields(dest, src, target, unwind, tys);
     }
 
-    fn generator_shim(
+    fn coroutine_shim(
         &mut self,
         dest: Place<'tcx>,
         src: Place<'tcx>,
         gen_def_id: DefId,
-        args: GeneratorArgs<'tcx>,
+        args: CoroutineArgs<'tcx>,
     ) {
         self.block(vec![], TerminatorKind::Goto { target: self.block_index_offset(3) }, false);
         let unwind = self.block(vec![], TerminatorKind::UnwindResume, true);
diff --git a/compiler/rustc_mir_transform/src/simplify.rs b/compiler/rustc_mir_transform/src/simplify.rs
index 2795cf15702..88c89e106fd 100644
--- a/compiler/rustc_mir_transform/src/simplify.rs
+++ b/compiler/rustc_mir_transform/src/simplify.rs
@@ -28,10 +28,8 @@
 //! return.
 
 use crate::MirPass;
-use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
-use rustc_index::bit_set::BitSet;
+use rustc_data_structures::fx::FxIndexSet;
 use rustc_index::{Idx, IndexSlice, IndexVec};
-use rustc_middle::mir::coverage::*;
 use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
@@ -68,7 +66,7 @@ impl SimplifyCfg {
 pub fn simplify_cfg<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     CfgSimplifier::new(body).simplify();
     remove_duplicate_unreachable_blocks(tcx, body);
-    remove_dead_blocks(tcx, body);
+    remove_dead_blocks(body);
 
     // FIXME: Should probably be moved into some kind of pass manager
     body.basic_blocks_mut().raw.shrink_to_fit();
@@ -337,7 +335,7 @@ pub fn remove_duplicate_unreachable_blocks<'tcx>(tcx: TyCtxt<'tcx>, body: &mut B
     }
 }
 
-pub fn remove_dead_blocks<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+pub fn remove_dead_blocks(body: &mut Body<'_>) {
     let reachable = traversal::reachable_as_bitset(body);
     let num_blocks = body.basic_blocks.len();
     if num_blocks == reachable.count() {
@@ -345,10 +343,6 @@ pub fn remove_dead_blocks<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     }
 
     let basic_blocks = body.basic_blocks.as_mut();
-    let source_scopes = &body.source_scopes;
-    if tcx.sess.instrument_coverage() {
-        save_unreachable_coverage(basic_blocks, source_scopes, &reachable);
-    }
 
     let mut replacements: Vec<_> = (0..num_blocks).map(BasicBlock::new).collect();
     let mut orig_index = 0;
@@ -370,97 +364,6 @@ pub fn remove_dead_blocks<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     }
 }
 
-/// Some MIR transforms can determine at compile time that a sequences of
-/// statements will never be executed, so they can be dropped from the MIR.
-/// For example, an `if` or `else` block that is guaranteed to never be executed
-/// because its condition can be evaluated at compile time, such as by const
-/// evaluation: `if false { ... }`.
-///
-/// Those statements are bypassed by redirecting paths in the CFG around the
-/// `dead blocks`; but with `-C instrument-coverage`, the dead blocks usually
-/// include `Coverage` statements representing the Rust source code regions to
-/// be counted at runtime. Without these `Coverage` statements, the regions are
-/// lost, and the Rust source code will show no coverage information.
-///
-/// What we want to show in a coverage report is the dead code with coverage
-/// counts of `0`. To do this, we need to save the code regions, by injecting
-/// `Unreachable` coverage statements. These are non-executable statements whose
-/// code regions are still recorded in the coverage map, representing regions
-/// with `0` executions.
-///
-/// If there are no live `Counter` `Coverage` statements remaining, we remove
-/// `Coverage` statements along with the dead blocks. Since at least one
-/// counter per function is required by LLVM (and necessary, to add the
-/// `function_hash` to the counter's call to the LLVM intrinsic
-/// `instrprof.increment()`).
-///
-/// The `generator::StateTransform` MIR pass and MIR inlining can create
-/// atypical conditions, where all live `Counter`s are dropped from the MIR.
-///
-/// With MIR inlining we can have coverage counters belonging to different
-/// instances in a single body, so the strategy described above is applied to
-/// coverage counters from each instance individually.
-fn save_unreachable_coverage(
-    basic_blocks: &mut IndexSlice<BasicBlock, BasicBlockData<'_>>,
-    source_scopes: &IndexSlice<SourceScope, SourceScopeData<'_>>,
-    reachable: &BitSet<BasicBlock>,
-) {
-    // Identify instances that still have some live coverage counters left.
-    let mut live = FxHashSet::default();
-    for bb in reachable.iter() {
-        let basic_block = &basic_blocks[bb];
-        for statement in &basic_block.statements {
-            let StatementKind::Coverage(coverage) = &statement.kind else { continue };
-            let CoverageKind::Counter { .. } = coverage.kind else { continue };
-            let instance = statement.source_info.scope.inlined_instance(source_scopes);
-            live.insert(instance);
-        }
-    }
-
-    for bb in reachable.iter() {
-        let block = &mut basic_blocks[bb];
-        for statement in &mut block.statements {
-            let StatementKind::Coverage(_) = &statement.kind else { continue };
-            let instance = statement.source_info.scope.inlined_instance(source_scopes);
-            if !live.contains(&instance) {
-                statement.make_nop();
-            }
-        }
-    }
-
-    if live.is_empty() {
-        return;
-    }
-
-    // Retain coverage for instances that still have some live counters left.
-    let mut retained_coverage = Vec::new();
-    for dead_block in basic_blocks.indices() {
-        if reachable.contains(dead_block) {
-            continue;
-        }
-        let dead_block = &basic_blocks[dead_block];
-        for statement in &dead_block.statements {
-            let StatementKind::Coverage(coverage) = &statement.kind else { continue };
-            let Some(code_region) = &coverage.code_region else { continue };
-            let instance = statement.source_info.scope.inlined_instance(source_scopes);
-            if live.contains(&instance) {
-                retained_coverage.push((statement.source_info, code_region.clone()));
-            }
-        }
-    }
-
-    let start_block = &mut basic_blocks[START_BLOCK];
-    start_block.statements.extend(retained_coverage.into_iter().map(
-        |(source_info, code_region)| Statement {
-            source_info,
-            kind: StatementKind::Coverage(Box::new(Coverage {
-                kind: CoverageKind::Unreachable,
-                code_region: Some(code_region),
-            })),
-        },
-    ));
-}
-
 pub enum SimplifyLocals {
     BeforeConstProp,
     Final,
diff --git a/compiler/rustc_mir_transform/src/sroa.rs b/compiler/rustc_mir_transform/src/sroa.rs
index c21b1724cbb..427cc1e1924 100644
--- a/compiler/rustc_mir_transform/src/sroa.rs
+++ b/compiler/rustc_mir_transform/src/sroa.rs
@@ -20,8 +20,8 @@ impl<'tcx> MirPass<'tcx> for ScalarReplacementOfAggregates {
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         debug!(def_id = ?body.source.def_id());
 
-        // Avoid query cycles (generators require optimized MIR for layout).
-        if tcx.type_of(body.source.def_id()).instantiate_identity().is_generator() {
+        // Avoid query cycles (coroutines require optimized MIR for layout).
+        if tcx.type_of(body.source.def_id()).instantiate_identity().is_coroutine() {
             return;
         }
 
diff --git a/compiler/rustc_mir_transform/src/ssa.rs b/compiler/rustc_mir_transform/src/ssa.rs
index af9514ed6bb..8dc7b60c4e5 100644
--- a/compiler/rustc_mir_transform/src/ssa.rs
+++ b/compiler/rustc_mir_transform/src/ssa.rs
@@ -5,7 +5,6 @@
 //! As a consequence of rule 2, we consider that borrowed locals are not SSA, even if they are
 //! `Freeze`, as we do not track that the assignment dominates all uses of the borrow.
 
-use either::Either;
 use rustc_data_structures::graph::dominators::Dominators;
 use rustc_index::bit_set::BitSet;
 use rustc_index::{IndexSlice, IndexVec};
@@ -15,7 +14,7 @@ use rustc_middle::mir::*;
 
 pub struct SsaLocals {
     /// Assignments to each local. This defines whether the local is SSA.
-    assignments: IndexVec<Local, Set1<LocationExtended>>,
+    assignments: IndexVec<Local, Set1<DefLocation>>,
     /// We visit the body in reverse postorder, to ensure each local is assigned before it is used.
     /// We remember the order in which we saw the assignments to compute the SSA values in a single
     /// pass.
@@ -27,39 +26,10 @@ pub struct SsaLocals {
     direct_uses: IndexVec<Local, u32>,
 }
 
-/// We often encounter MIR bodies with 1 or 2 basic blocks. In those cases, it's unnecessary to
-/// actually compute dominators, we can just compare block indices because bb0 is always the first
-/// block, and in any body all other blocks are always dominated by bb0.
-struct SmallDominators<'a> {
-    inner: Option<&'a Dominators<BasicBlock>>,
-}
-
-impl SmallDominators<'_> {
-    fn dominates(&self, first: Location, second: Location) -> bool {
-        if first.block == second.block {
-            first.statement_index <= second.statement_index
-        } else if let Some(inner) = &self.inner {
-            inner.dominates(first.block, second.block)
-        } else {
-            first.block < second.block
-        }
-    }
-
-    fn check_dominates(&mut self, set: &mut Set1<LocationExtended>, loc: Location) {
-        let assign_dominates = match *set {
-            Set1::Empty | Set1::Many => false,
-            Set1::One(LocationExtended::Arg) => true,
-            Set1::One(LocationExtended::Plain(assign)) => {
-                self.dominates(assign.successor_within_block(), loc)
-            }
-        };
-        // We are visiting a use that is not dominated by an assignment.
-        // Either there is a cycle involved, or we are reading for uninitialized local.
-        // Bail out.
-        if !assign_dominates {
-            *set = Set1::Many;
-        }
-    }
+pub enum AssignedValue<'a, 'tcx> {
+    Arg,
+    Rvalue(&'a mut Rvalue<'tcx>),
+    Terminator(&'a mut TerminatorKind<'tcx>),
 }
 
 impl SsaLocals {
@@ -67,15 +37,14 @@ impl SsaLocals {
         let assignment_order = Vec::with_capacity(body.local_decls.len());
 
         let assignments = IndexVec::from_elem(Set1::Empty, &body.local_decls);
-        let dominators =
-            if body.basic_blocks.len() > 2 { Some(body.basic_blocks.dominators()) } else { None };
-        let dominators = SmallDominators { inner: dominators };
+        let dominators = body.basic_blocks.dominators();
 
         let direct_uses = IndexVec::from_elem(0, &body.local_decls);
         let mut visitor = SsaVisitor { assignments, assignment_order, dominators, direct_uses };
 
         for local in body.args_iter() {
-            visitor.assignments[local] = Set1::One(LocationExtended::Arg);
+            visitor.assignments[local] = Set1::One(DefLocation::Argument);
+            visitor.assignment_order.push(local);
         }
 
         // For SSA assignments, a RPO visit will see the assignment before it sees any use.
@@ -131,14 +100,7 @@ impl SsaLocals {
         location: Location,
     ) -> bool {
         match self.assignments[local] {
-            Set1::One(LocationExtended::Arg) => true,
-            Set1::One(LocationExtended::Plain(ass)) => {
-                if ass.block == location.block {
-                    ass.statement_index < location.statement_index
-                } else {
-                    dominators.dominates(ass.block, location.block)
-                }
-            }
+            Set1::One(def) => def.dominates(location, dominators),
             _ => false,
         }
     }
@@ -148,9 +110,9 @@ impl SsaLocals {
         body: &'a Body<'tcx>,
     ) -> impl Iterator<Item = (Local, &'a Rvalue<'tcx>, Location)> + 'a {
         self.assignment_order.iter().filter_map(|&local| {
-            if let Set1::One(LocationExtended::Plain(loc)) = self.assignments[local] {
+            if let Set1::One(DefLocation::Body(loc)) = self.assignments[local] {
+                let stmt = body.stmt_at(loc).left()?;
                 // `loc` must point to a direct assignment to `local`.
-                let Either::Left(stmt) = body.stmt_at(loc) else { bug!() };
                 let Some((target, rvalue)) = stmt.kind.as_assign() else { bug!() };
                 assert_eq!(target.as_local(), Some(local));
                 Some((local, rvalue, loc))
@@ -162,18 +124,33 @@ impl SsaLocals {
 
     pub fn for_each_assignment_mut<'tcx>(
         &self,
-        basic_blocks: &mut BasicBlocks<'tcx>,
-        mut f: impl FnMut(Local, &mut Rvalue<'tcx>, Location),
+        basic_blocks: &mut IndexSlice<BasicBlock, BasicBlockData<'tcx>>,
+        mut f: impl FnMut(Local, AssignedValue<'_, 'tcx>, Location),
     ) {
         for &local in &self.assignment_order {
-            if let Set1::One(LocationExtended::Plain(loc)) = self.assignments[local] {
-                // `loc` must point to a direct assignment to `local`.
-                let bbs = basic_blocks.as_mut_preserves_cfg();
-                let bb = &mut bbs[loc.block];
-                let stmt = &mut bb.statements[loc.statement_index];
-                let StatementKind::Assign(box (target, ref mut rvalue)) = stmt.kind else { bug!() };
-                assert_eq!(target.as_local(), Some(local));
-                f(local, rvalue, loc)
+            match self.assignments[local] {
+                Set1::One(DefLocation::Argument) => f(
+                    local,
+                    AssignedValue::Arg,
+                    Location { block: START_BLOCK, statement_index: 0 },
+                ),
+                Set1::One(DefLocation::Body(loc)) => {
+                    let bb = &mut basic_blocks[loc.block];
+                    let value = if loc.statement_index < bb.statements.len() {
+                        // `loc` must point to a direct assignment to `local`.
+                        let stmt = &mut bb.statements[loc.statement_index];
+                        let StatementKind::Assign(box (target, ref mut rvalue)) = stmt.kind else {
+                            bug!()
+                        };
+                        assert_eq!(target.as_local(), Some(local));
+                        AssignedValue::Rvalue(rvalue)
+                    } else {
+                        let term = bb.terminator_mut();
+                        AssignedValue::Terminator(&mut term.kind)
+                    };
+                    f(local, value, loc)
+                }
+                _ => {}
             }
         }
     }
@@ -224,19 +201,29 @@ impl SsaLocals {
     }
 }
 
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-enum LocationExtended {
-    Plain(Location),
-    Arg,
-}
-
 struct SsaVisitor<'a> {
-    dominators: SmallDominators<'a>,
-    assignments: IndexVec<Local, Set1<LocationExtended>>,
+    dominators: &'a Dominators<BasicBlock>,
+    assignments: IndexVec<Local, Set1<DefLocation>>,
     assignment_order: Vec<Local>,
     direct_uses: IndexVec<Local, u32>,
 }
 
+impl SsaVisitor<'_> {
+    fn check_dominates(&mut self, local: Local, loc: Location) {
+        let set = &mut self.assignments[local];
+        let assign_dominates = match *set {
+            Set1::Empty | Set1::Many => false,
+            Set1::One(def) => def.dominates(loc, self.dominators),
+        };
+        // We are visiting a use that is not dominated by an assignment.
+        // Either there is a cycle involved, or we are reading for uninitialized local.
+        // Bail out.
+        if !assign_dominates {
+            *set = Set1::Many;
+        }
+    }
+}
+
 impl<'tcx> Visitor<'tcx> for SsaVisitor<'_> {
     fn visit_local(&mut self, local: Local, ctxt: PlaceContext, loc: Location) {
         match ctxt {
@@ -254,7 +241,7 @@ impl<'tcx> Visitor<'tcx> for SsaVisitor<'_> {
                 self.assignments[local] = Set1::Many;
             }
             PlaceContext::NonMutatingUse(_) => {
-                self.dominators.check_dominates(&mut self.assignments[local], loc);
+                self.check_dominates(local, loc);
                 self.direct_uses[local] += 1;
             }
             PlaceContext::NonUse(_) => {}
@@ -262,34 +249,34 @@ impl<'tcx> Visitor<'tcx> for SsaVisitor<'_> {
     }
 
     fn visit_place(&mut self, place: &Place<'tcx>, ctxt: PlaceContext, loc: Location) {
-        if place.projection.first() == Some(&PlaceElem::Deref) {
-            // Do not do anything for storage statements and debuginfo.
+        let location = match ctxt {
+            PlaceContext::MutatingUse(
+                MutatingUseContext::Store | MutatingUseContext::Call | MutatingUseContext::Yield,
+            ) => Some(DefLocation::Body(loc)),
+            _ => None,
+        };
+        if let Some(location) = location
+            && let Some(local) = place.as_local()
+        {
+            self.assignments[local].insert(location);
+            if let Set1::One(_) = self.assignments[local] {
+                // Only record if SSA-like, to avoid growing the vector needlessly.
+                self.assignment_order.push(local);
+            }
+        } else if place.projection.first() == Some(&PlaceElem::Deref) {
+            // Do not do anything for debuginfo.
             if ctxt.is_use() {
                 // Only change the context if it is a real use, not a "use" in debuginfo.
                 let new_ctxt = PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy);
 
                 self.visit_projection(place.as_ref(), new_ctxt, loc);
-                self.dominators.check_dominates(&mut self.assignments[place.local], loc);
+                self.check_dominates(place.local, loc);
             }
-            return;
         } else {
             self.visit_projection(place.as_ref(), ctxt, loc);
             self.visit_local(place.local, ctxt, loc);
         }
     }
-
-    fn visit_assign(&mut self, place: &Place<'tcx>, rvalue: &Rvalue<'tcx>, loc: Location) {
-        if let Some(local) = place.as_local() {
-            self.assignments[local].insert(LocationExtended::Plain(loc));
-            if let Set1::One(_) = self.assignments[local] {
-                // Only record if SSA-like, to avoid growing the vector needlessly.
-                self.assignment_order.push(local);
-            }
-        } else {
-            self.visit_place(place, PlaceContext::MutatingUse(MutatingUseContext::Store), loc);
-        }
-        self.visit_rvalue(rvalue, loc);
-    }
 }
 
 #[instrument(level = "trace", skip(ssa, body))]
@@ -356,7 +343,7 @@ fn compute_copy_classes(ssa: &mut SsaLocals, body: &Body<'_>) {
 #[derive(Debug)]
 pub(crate) struct StorageLiveLocals {
     /// Set of "StorageLive" statements for each local.
-    storage_live: IndexVec<Local, Set1<LocationExtended>>,
+    storage_live: IndexVec<Local, Set1<DefLocation>>,
 }
 
 impl StorageLiveLocals {
@@ -366,13 +353,13 @@ impl StorageLiveLocals {
     ) -> StorageLiveLocals {
         let mut storage_live = IndexVec::from_elem(Set1::Empty, &body.local_decls);
         for local in always_storage_live_locals.iter() {
-            storage_live[local] = Set1::One(LocationExtended::Arg);
+            storage_live[local] = Set1::One(DefLocation::Argument);
         }
         for (block, bbdata) in body.basic_blocks.iter_enumerated() {
             for (statement_index, statement) in bbdata.statements.iter().enumerate() {
                 if let StatementKind::StorageLive(local) = statement.kind {
                     storage_live[local]
-                        .insert(LocationExtended::Plain(Location { block, statement_index }));
+                        .insert(DefLocation::Body(Location { block, statement_index }));
                 }
             }
         }
diff --git a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
index 092bcb5c979..cb028a92d49 100644
--- a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
+++ b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
@@ -30,22 +30,17 @@ fn get_switched_on_type<'tcx>(
     let terminator = block_data.terminator();
 
     // Only bother checking blocks which terminate by switching on a local.
-    if let Some(local) = get_discriminant_local(&terminator.kind) {
-        let stmt_before_term = (!block_data.statements.is_empty())
-            .then(|| &block_data.statements[block_data.statements.len() - 1].kind);
-
-        if let Some(StatementKind::Assign(box (l, Rvalue::Discriminant(place)))) = stmt_before_term
-        {
-            if l.as_local() == Some(local) {
-                let ty = place.ty(body, tcx).ty;
-                if ty.is_enum() {
-                    return Some(ty);
-                }
-            }
-        }
+    if let Some(local) = get_discriminant_local(&terminator.kind)
+        && let [.., stmt_before_term] = &block_data.statements[..]
+        && let StatementKind::Assign(box (l, Rvalue::Discriminant(place))) = stmt_before_term.kind
+        && l.as_local() == Some(local)
+        && let ty = place.ty(body, tcx).ty
+        && ty.is_enum()
+    {
+        Some(ty)
+    } else {
+        None
     }
-
-    None
 }
 
 fn variant_discriminants<'tcx>(
diff --git a/compiler/rustc_mir_transform/src/unreachable_prop.rs b/compiler/rustc_mir_transform/src/unreachable_prop.rs
index 0b9311a20ef..ea7aafd866b 100644
--- a/compiler/rustc_mir_transform/src/unreachable_prop.rs
+++ b/compiler/rustc_mir_transform/src/unreachable_prop.rs
@@ -65,7 +65,7 @@ impl MirPass<'_> for UnreachablePropagation {
         }
 
         if replaced {
-            simplify::remove_dead_blocks(tcx, body);
+            simplify::remove_dead_blocks(body);
         }
     }
 }