about summary refs log tree commit diff
path: root/compiler/rustc_mir_transform/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_mir_transform/src')
-rw-r--r--compiler/rustc_mir_transform/src/abort_unwinding_calls.rs2
-rw-r--r--compiler/rustc_mir_transform/src/check_unsafety.rs4
-rw-r--r--compiler/rustc_mir_transform/src/const_prop.rs10
-rw-r--r--compiler/rustc_mir_transform/src/const_prop_lint.rs8
-rw-r--r--compiler/rustc_mir_transform/src/coroutine.rs (renamed from compiler/rustc_mir_transform/src/generator.rs)344
-rw-r--r--compiler/rustc_mir_transform/src/cost_checker.rs98
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters.rs26
-rw-r--r--compiler/rustc_mir_transform/src/coverage/graph.rs2
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs195
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs42
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs67
-rw-r--r--compiler/rustc_mir_transform/src/dest_prop.rs4
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drops.rs184
-rw-r--r--compiler/rustc_mir_transform/src/ffi_unwind_calls.rs2
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs2
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs98
-rw-r--r--compiler/rustc_mir_transform/src/jump_threading.rs759
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs13
-rw-r--r--compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs2
-rw-r--r--compiler/rustc_mir_transform/src/remove_uninit_drops.rs7
-rw-r--r--compiler/rustc_mir_transform/src/remove_zsts.rs4
-rw-r--r--compiler/rustc_mir_transform/src/separate_const_switch.rs4
-rw-r--r--compiler/rustc_mir_transform/src/shim.rs18
-rw-r--r--compiler/rustc_mir_transform/src/sroa.rs4
24 files changed, 1302 insertions, 597 deletions
diff --git a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
index 74243f1f8f2..2b3d423ea61 100644
--- a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
+++ b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
@@ -40,7 +40,7 @@ impl<'tcx> MirPass<'tcx> for AbortUnwindingCalls {
         let body_abi = match body_ty.kind() {
             ty::FnDef(..) => body_ty.fn_sig(tcx).abi(),
             ty::Closure(..) => Abi::RustCall,
-            ty::Generator(..) => Abi::Rust,
+            ty::Coroutine(..) => Abi::Rust,
             _ => span_bug!(body.span, "unexpected body ty: {:?}", body_ty),
         };
         let body_can_unwind = layout::fn_can_unwind(tcx, Some(def_id), body_abi);
diff --git a/compiler/rustc_mir_transform/src/check_unsafety.rs b/compiler/rustc_mir_transform/src/check_unsafety.rs
index 7e4731f5d8a..8872f9a97d7 100644
--- a/compiler/rustc_mir_transform/src/check_unsafety.rs
+++ b/compiler/rustc_mir_transform/src/check_unsafety.rs
@@ -56,7 +56,7 @@ impl<'tcx> Visitor<'tcx> for UnsafetyChecker<'_, 'tcx> {
             | TerminatorKind::Drop { .. }
             | TerminatorKind::Yield { .. }
             | TerminatorKind::Assert { .. }
-            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::CoroutineDrop
             | TerminatorKind::UnwindResume
             | TerminatorKind::UnwindTerminate(_)
             | TerminatorKind::Return
@@ -128,7 +128,7 @@ impl<'tcx> Visitor<'tcx> for UnsafetyChecker<'_, 'tcx> {
                         ),
                     }
                 }
-                &AggregateKind::Closure(def_id, _) | &AggregateKind::Generator(def_id, _, _) => {
+                &AggregateKind::Closure(def_id, _) | &AggregateKind::Coroutine(def_id, _, _) => {
                     let def_id = def_id.expect_local();
                     let UnsafetyCheckResult { violations, used_unsafe_blocks, .. } =
                         self.tcx.unsafety_check_result(def_id);
diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs
index 3450a0f3686..53c0d0dea29 100644
--- a/compiler/rustc_mir_transform/src/const_prop.rs
+++ b/compiler/rustc_mir_transform/src/const_prop.rs
@@ -82,11 +82,11 @@ impl<'tcx> MirPass<'tcx> for ConstProp {
             return;
         }
 
-        // FIXME(welseywiser) const prop doesn't work on generators because of query cycles
+        // FIXME(welseywiser) const prop doesn't work on coroutines because of query cycles
         // computing their layout.
-        let is_generator = def_kind == DefKind::Generator;
-        if is_generator {
-            trace!("ConstProp skipped for generator {:?}", def_id);
+        let is_coroutine = def_kind == DefKind::Coroutine;
+        if is_coroutine {
+            trace!("ConstProp skipped for coroutine {:?}", def_id);
             return;
         }
 
@@ -512,7 +512,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
 
     fn replace_with_const(&mut self, place: Place<'tcx>) -> Option<Const<'tcx>> {
         // This will return None if the above `const_prop` invocation only "wrote" a
-        // type whose creation requires no write. E.g. a generator whose initial state
+        // type whose creation requires no write. E.g. a coroutine whose initial state
         // consists solely of uninitialized memory (so it doesn't capture any locals).
         let value = self.get_const(place)?;
         if !self.tcx.consider_optimizing(|| format!("ConstantPropagation - {value:?}")) {
diff --git a/compiler/rustc_mir_transform/src/const_prop_lint.rs b/compiler/rustc_mir_transform/src/const_prop_lint.rs
index aad513d7355..a23ba9c4aa9 100644
--- a/compiler/rustc_mir_transform/src/const_prop_lint.rs
+++ b/compiler/rustc_mir_transform/src/const_prop_lint.rs
@@ -59,10 +59,10 @@ impl<'tcx> MirLint<'tcx> for ConstPropLint {
             return;
         }
 
-        // FIXME(welseywiser) const prop doesn't work on generators because of query cycles
+        // FIXME(welseywiser) const prop doesn't work on coroutines because of query cycles
         // computing their layout.
-        if let DefKind::Generator = def_kind {
-            trace!("ConstPropLint skipped for generator {:?}", def_id);
+        if let DefKind::Coroutine = def_kind {
+            trace!("ConstPropLint skipped for coroutine {:?}", def_id);
             return;
         }
 
@@ -648,7 +648,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
             | TerminatorKind::Unreachable
             | TerminatorKind::Drop { .. }
             | TerminatorKind::Yield { .. }
-            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::CoroutineDrop
             | TerminatorKind::FalseEdge { .. }
             | TerminatorKind::FalseUnwind { .. }
             | TerminatorKind::Call { .. }
diff --git a/compiler/rustc_mir_transform/src/generator.rs b/compiler/rustc_mir_transform/src/coroutine.rs
index a6693519e54..fa56d59dd80 100644
--- a/compiler/rustc_mir_transform/src/generator.rs
+++ b/compiler/rustc_mir_transform/src/coroutine.rs
@@ -1,53 +1,53 @@
-//! This is the implementation of the pass which transforms generators into state machines.
+//! This is the implementation of the pass which transforms coroutines into state machines.
 //!
-//! MIR generation for generators creates a function which has a self argument which
-//! passes by value. This argument is effectively a generator type which only contains upvars and
-//! is only used for this argument inside the MIR for the generator.
+//! MIR generation for coroutines creates a function which has a self argument which
+//! passes by value. This argument is effectively a coroutine type which only contains upvars and
+//! is only used for this argument inside the MIR for the coroutine.
 //! It is passed by value to enable upvars to be moved out of it. Drop elaboration runs on that
 //! MIR before this pass and creates drop flags for MIR locals.
-//! It will also drop the generator argument (which only consists of upvars) if any of the upvars
-//! are moved out of. This pass elaborates the drops of upvars / generator argument in the case
+//! It will also drop the coroutine argument (which only consists of upvars) if any of the upvars
+//! are moved out of. This pass elaborates the drops of upvars / coroutine argument in the case
 //! that none of the upvars were moved out of. This is because we cannot have any drops of this
-//! generator in the MIR, since it is used to create the drop glue for the generator. We'd get
+//! coroutine in the MIR, since it is used to create the drop glue for the coroutine. We'd get
 //! infinite recursion otherwise.
 //!
-//! This pass creates the implementation for either the `Generator::resume` or `Future::poll`
-//! function and the drop shim for the generator based on the MIR input.
-//! It converts the generator argument from Self to &mut Self adding derefs in the MIR as needed.
-//! It computes the final layout of the generator struct which looks like this:
+//! This pass creates the implementation for either the `Coroutine::resume` or `Future::poll`
+//! function and the drop shim for the coroutine based on the MIR input.
+//! It converts the coroutine argument from Self to &mut Self adding derefs in the MIR as needed.
+//! It computes the final layout of the coroutine struct which looks like this:
 //!     First upvars are stored
-//!     It is followed by the generator state field.
+//!     It is followed by the coroutine state field.
 //!     Then finally the MIR locals which are live across a suspension point are stored.
 //!     ```ignore (illustrative)
-//!     struct Generator {
+//!     struct Coroutine {
 //!         upvars...,
 //!         state: u32,
 //!         mir_locals...,
 //!     }
 //!     ```
 //! This pass computes the meaning of the state field and the MIR locals which are live
-//! across a suspension point. There are however three hardcoded generator states:
-//!     0 - Generator have not been resumed yet
-//!     1 - Generator has returned / is completed
-//!     2 - Generator has been poisoned
+//! across a suspension point. There are however three hardcoded coroutine states:
+//!     0 - Coroutine have not been resumed yet
+//!     1 - Coroutine has returned / is completed
+//!     2 - Coroutine has been poisoned
 //!
-//! It also rewrites `return x` and `yield y` as setting a new generator state and returning
-//! `GeneratorState::Complete(x)` and `GeneratorState::Yielded(y)`,
+//! It also rewrites `return x` and `yield y` as setting a new coroutine state and returning
+//! `CoroutineState::Complete(x)` and `CoroutineState::Yielded(y)`,
 //! or `Poll::Ready(x)` and `Poll::Pending` respectively.
-//! MIR locals which are live across a suspension point are moved to the generator struct
-//! with references to them being updated with references to the generator struct.
+//! MIR locals which are live across a suspension point are moved to the coroutine struct
+//! with references to them being updated with references to the coroutine struct.
 //!
-//! The pass creates two functions which have a switch on the generator state giving
+//! The pass creates two functions which have a switch on the coroutine state giving
 //! the action to take.
 //!
-//! One of them is the implementation of `Generator::resume` / `Future::poll`.
-//! For generators with state 0 (unresumed) it starts the execution of the generator.
-//! For generators with state 1 (returned) and state 2 (poisoned) it panics.
+//! One of them is the implementation of `Coroutine::resume` / `Future::poll`.
+//! For coroutines with state 0 (unresumed) it starts the execution of the coroutine.
+//! For coroutines with state 1 (returned) and state 2 (poisoned) it panics.
 //! Otherwise it continues the execution from the last suspension point.
 //!
-//! The other function is the drop glue for the generator.
-//! For generators with state 0 (unresumed) it drops the upvars of the generator.
-//! For generators with state 1 (returned) and state 2 (poisoned) it does nothing.
+//! The other function is the drop glue for the coroutine.
+//! For coroutines with state 0 (unresumed) it drops the upvars of the coroutine.
+//! For coroutines with state 1 (returned) and state 2 (poisoned) it does nothing.
 //! Otherwise it drops all the values in scope at the last suspension point.
 
 use crate::abort_unwinding_calls;
@@ -60,7 +60,7 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 use rustc_errors::pluralize;
 use rustc_hir as hir;
 use rustc_hir::lang_items::LangItem;
-use rustc_hir::GeneratorKind;
+use rustc_hir::CoroutineKind;
 use rustc_index::bit_set::{BitMatrix, BitSet, GrowableBitSet};
 use rustc_index::{Idx, IndexVec};
 use rustc_middle::mir::dump_mir;
@@ -68,7 +68,7 @@ use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
 use rustc_middle::mir::*;
 use rustc_middle::ty::InstanceDef;
 use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt};
-use rustc_middle::ty::{GeneratorArgs, GenericArgsRef};
+use rustc_middle::ty::{CoroutineArgs, GenericArgsRef};
 use rustc_mir_dataflow::impls::{
     MaybeBorrowedLocals, MaybeLiveLocals, MaybeRequiresStorage, MaybeStorageLive,
 };
@@ -196,19 +196,19 @@ fn replace_base<'tcx>(place: &mut Place<'tcx>, new_base: Place<'tcx>, tcx: TyCtx
 
 const SELF_ARG: Local = Local::from_u32(1);
 
-/// Generator has not been resumed yet.
-const UNRESUMED: usize = GeneratorArgs::UNRESUMED;
-/// Generator has returned / is completed.
-const RETURNED: usize = GeneratorArgs::RETURNED;
-/// Generator has panicked and is poisoned.
-const POISONED: usize = GeneratorArgs::POISONED;
+/// Coroutine has not been resumed yet.
+const UNRESUMED: usize = CoroutineArgs::UNRESUMED;
+/// Coroutine has returned / is completed.
+const RETURNED: usize = CoroutineArgs::RETURNED;
+/// Coroutine has panicked and is poisoned.
+const POISONED: usize = CoroutineArgs::POISONED;
 
-/// Number of variants to reserve in generator state. Corresponds to
-/// `UNRESUMED` (beginning of a generator) and `RETURNED`/`POISONED`
-/// (end of a generator) states.
+/// Number of variants to reserve in coroutine state. Corresponds to
+/// `UNRESUMED` (beginning of a coroutine) and `RETURNED`/`POISONED`
+/// (end of a coroutine) states.
 const RESERVED_VARIANTS: usize = 3;
 
-/// A `yield` point in the generator.
+/// A `yield` point in the coroutine.
 struct SuspensionPoint<'tcx> {
     /// State discriminant used when suspending or resuming at this point.
     state: usize,
@@ -216,7 +216,7 @@ struct SuspensionPoint<'tcx> {
     resume: BasicBlock,
     /// Where to move the resume argument after resumption.
     resume_arg: Place<'tcx>,
-    /// Which block to jump to if the generator is dropped in this state.
+    /// Which block to jump to if the coroutine is dropped in this state.
     drop: Option<BasicBlock>,
     /// Set of locals that have live storage while at this suspension point.
     storage_liveness: GrowableBitSet<Local>,
@@ -228,10 +228,10 @@ struct TransformVisitor<'tcx> {
     state_adt_ref: AdtDef<'tcx>,
     state_args: GenericArgsRef<'tcx>,
 
-    // The type of the discriminant in the generator struct
+    // The type of the discriminant in the coroutine struct
     discr_ty: Ty<'tcx>,
 
-    // Mapping from Local to (type of local, generator struct index)
+    // Mapping from Local to (type of local, coroutine struct index)
     // FIXME(eddyb) This should use `IndexVec<Local, Option<_>>`.
     remap: FxHashMap<Local, (Ty<'tcx>, VariantIdx, FieldIdx)>,
 
@@ -249,9 +249,9 @@ struct TransformVisitor<'tcx> {
 }
 
 impl<'tcx> TransformVisitor<'tcx> {
-    // Make a `GeneratorState` or `Poll` variant assignment.
+    // Make a `CoroutineState` or `Poll` variant assignment.
     //
-    // `core::ops::GeneratorState` only has single element tuple variants,
+    // `core::ops::CoroutineState` only has single element tuple variants,
     // so we can just write to the downcasted first field and then set the
     // discriminant to the appropriate variant.
     fn make_state(
@@ -262,8 +262,8 @@ impl<'tcx> TransformVisitor<'tcx> {
         statements: &mut Vec<Statement<'tcx>>,
     ) {
         let idx = VariantIdx::new(match (is_return, self.is_async_kind) {
-            (true, false) => 1,  // GeneratorState::Complete
-            (false, false) => 0, // GeneratorState::Yielded
+            (true, false) => 1,  // CoroutineState::Complete
+            (false, false) => 0, // CoroutineState::Yielded
             (true, true) => 0,   // Poll::Ready
             (false, true) => 1,  // Poll::Pending
         });
@@ -285,7 +285,7 @@ impl<'tcx> TransformVisitor<'tcx> {
             return;
         }
 
-        // else: `Poll::Ready(x)`, `GeneratorState::Yielded(x)` or `GeneratorState::Complete(x)`
+        // else: `Poll::Ready(x)`, `CoroutineState::Yielded(x)` or `CoroutineState::Complete(x)`
         assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 1);
 
         statements.push(Statement {
@@ -297,7 +297,7 @@ impl<'tcx> TransformVisitor<'tcx> {
         });
     }
 
-    // Create a Place referencing a generator struct field
+    // Create a Place referencing a coroutine struct field
     fn make_field(&self, variant_index: VariantIdx, idx: FieldIdx, ty: Ty<'tcx>) -> Place<'tcx> {
         let self_place = Place::from(SELF_ARG);
         let base = self.tcx.mk_place_downcast_unnamed(self_place, variant_index);
@@ -349,7 +349,7 @@ impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> {
         _context: PlaceContext,
         _location: Location,
     ) {
-        // Replace an Local in the remap with a generator struct access
+        // Replace an Local in the remap with a coroutine struct access
         if let Some(&(ty, variant_index, idx)) = self.remap.get(&place.local) {
             replace_base(place, self.make_field(variant_index, idx, ty), self.tcx);
         }
@@ -413,7 +413,7 @@ impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> {
     }
 }
 
-fn make_generator_state_argument_indirect<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+fn make_coroutine_state_argument_indirect<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     let gen_ty = body.local_decls.raw[1].ty;
 
     let ref_gen_ty = Ty::new_ref(
@@ -422,14 +422,14 @@ fn make_generator_state_argument_indirect<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Bo
         ty::TypeAndMut { ty: gen_ty, mutbl: Mutability::Mut },
     );
 
-    // Replace the by value generator argument
+    // Replace the by value coroutine argument
     body.local_decls.raw[1].ty = ref_gen_ty;
 
-    // Add a deref to accesses of the generator state
+    // Add a deref to accesses of the coroutine state
     DerefArgVisitor { tcx }.visit_body(body);
 }
 
-fn make_generator_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+fn make_coroutine_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     let ref_gen_ty = body.local_decls.raw[1].ty;
 
     let pin_did = tcx.require_lang_item(LangItem::Pin, Some(body.span));
@@ -437,10 +437,10 @@ fn make_generator_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body
     let args = tcx.mk_args(&[ref_gen_ty.into()]);
     let pin_ref_gen_ty = Ty::new_adt(tcx, pin_adt_ref, args);
 
-    // Replace the by ref generator argument
+    // Replace the by ref coroutine argument
     body.local_decls.raw[1].ty = pin_ref_gen_ty;
 
-    // Add the Pin field access to accesses of the generator state
+    // Add the Pin field access to accesses of the coroutine state
     PinArgVisitor { ref_gen_ty, tcx }.visit_body(body);
 }
 
@@ -465,7 +465,7 @@ fn replace_local<'tcx>(
     new_local
 }
 
-/// Transforms the `body` of the generator applying the following transforms:
+/// Transforms the `body` of the coroutine applying the following transforms:
 ///
 /// - Eliminates all the `get_context` calls that async lowering created.
 /// - Replace all `Local` `ResumeTy` types with `&mut Context<'_>` (`context_mut_ref`).
@@ -485,7 +485,7 @@ fn replace_local<'tcx>(
 ///
 /// The async lowering step and the type / lifetime inference / checking are
 /// still using the `ResumeTy` indirection for the time being, and that indirection
-/// is removed here. After this transform, the generator body only knows about `&mut Context<'_>`.
+/// is removed here. After this transform, the coroutine body only knows about `&mut Context<'_>`.
 fn transform_async_context<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     let context_mut_ref = Ty::new_task_context(tcx);
 
@@ -565,10 +565,10 @@ fn replace_resume_ty_local<'tcx>(
 
 struct LivenessInfo {
     /// Which locals are live across any suspension point.
-    saved_locals: GeneratorSavedLocals,
+    saved_locals: CoroutineSavedLocals,
 
     /// The set of saved locals live at each suspension point.
-    live_locals_at_suspension_points: Vec<BitSet<GeneratorSavedLocal>>,
+    live_locals_at_suspension_points: Vec<BitSet<CoroutineSavedLocal>>,
 
     /// Parallel vec to the above with SourceInfo for each yield terminator.
     source_info_at_suspension_points: Vec<SourceInfo>,
@@ -576,7 +576,7 @@ struct LivenessInfo {
     /// For every saved local, the set of other saved locals that are
     /// storage-live at the same time as this local. We cannot overlap locals in
     /// the layout which have conflicting storage.
-    storage_conflicts: BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
+    storage_conflicts: BitMatrix<CoroutineSavedLocal, CoroutineSavedLocal>,
 
     /// For every suspending block, the locals which are storage-live across
     /// that suspension point.
@@ -601,7 +601,7 @@ fn locals_live_across_suspend_points<'tcx>(
     // Calculate the MIR locals which have been previously
     // borrowed (even if they are still active).
     let borrowed_locals_results =
-        MaybeBorrowedLocals.into_engine(tcx, body_ref).pass_name("generator").iterate_to_fixpoint();
+        MaybeBorrowedLocals.into_engine(tcx, body_ref).pass_name("coroutine").iterate_to_fixpoint();
 
     let mut borrowed_locals_cursor = borrowed_locals_results.cloned_results_cursor(body_ref);
 
@@ -616,7 +616,7 @@ fn locals_live_across_suspend_points<'tcx>(
     // Calculate the liveness of MIR locals ignoring borrows.
     let mut liveness = MaybeLiveLocals
         .into_engine(tcx, body_ref)
-        .pass_name("generator")
+        .pass_name("coroutine")
         .iterate_to_fixpoint()
         .into_results_cursor(body_ref);
 
@@ -635,8 +635,8 @@ fn locals_live_across_suspend_points<'tcx>(
 
             if !movable {
                 // The `liveness` variable contains the liveness of MIR locals ignoring borrows.
-                // This is correct for movable generators since borrows cannot live across
-                // suspension points. However for immovable generators we need to account for
+                // This is correct for movable coroutines since borrows cannot live across
+                // suspension points. However for immovable coroutines we need to account for
                 // borrows, so we conservatively assume that all borrowed locals are live until
                 // we find a StorageDead statement referencing the locals.
                 // To do this we just union our `liveness` result with `borrowed_locals`, which
@@ -659,7 +659,7 @@ fn locals_live_across_suspend_points<'tcx>(
             requires_storage_cursor.seek_before_primary_effect(loc);
             live_locals.intersect(requires_storage_cursor.get());
 
-            // The generator argument is ignored.
+            // The coroutine argument is ignored.
             live_locals.remove(SELF_ARG);
 
             debug!("loc = {:?}, live_locals = {:?}", loc, live_locals);
@@ -674,7 +674,7 @@ fn locals_live_across_suspend_points<'tcx>(
     }
 
     debug!("live_locals_anywhere = {:?}", live_locals_at_any_suspension_point);
-    let saved_locals = GeneratorSavedLocals(live_locals_at_any_suspension_point);
+    let saved_locals = CoroutineSavedLocals(live_locals_at_any_suspension_point);
 
     // Renumber our liveness_map bitsets to include only the locals we are
     // saving.
@@ -701,21 +701,21 @@ fn locals_live_across_suspend_points<'tcx>(
 
 /// The set of `Local`s that must be saved across yield points.
 ///
-/// `GeneratorSavedLocal` is indexed in terms of the elements in this set;
-/// i.e. `GeneratorSavedLocal::new(1)` corresponds to the second local
+/// `CoroutineSavedLocal` is indexed in terms of the elements in this set;
+/// i.e. `CoroutineSavedLocal::new(1)` corresponds to the second local
 /// included in this set.
-struct GeneratorSavedLocals(BitSet<Local>);
+struct CoroutineSavedLocals(BitSet<Local>);
 
-impl GeneratorSavedLocals {
-    /// Returns an iterator over each `GeneratorSavedLocal` along with the `Local` it corresponds
+impl CoroutineSavedLocals {
+    /// Returns an iterator over each `CoroutineSavedLocal` along with the `Local` it corresponds
     /// to.
-    fn iter_enumerated(&self) -> impl '_ + Iterator<Item = (GeneratorSavedLocal, Local)> {
-        self.iter().enumerate().map(|(i, l)| (GeneratorSavedLocal::from(i), l))
+    fn iter_enumerated(&self) -> impl '_ + Iterator<Item = (CoroutineSavedLocal, Local)> {
+        self.iter().enumerate().map(|(i, l)| (CoroutineSavedLocal::from(i), l))
     }
 
     /// Transforms a `BitSet<Local>` that contains only locals saved across yield points to the
-    /// equivalent `BitSet<GeneratorSavedLocal>`.
-    fn renumber_bitset(&self, input: &BitSet<Local>) -> BitSet<GeneratorSavedLocal> {
+    /// equivalent `BitSet<CoroutineSavedLocal>`.
+    fn renumber_bitset(&self, input: &BitSet<Local>) -> BitSet<CoroutineSavedLocal> {
         assert!(self.superset(&input), "{:?} not a superset of {:?}", self.0, input);
         let mut out = BitSet::new_empty(self.count());
         for (saved_local, local) in self.iter_enumerated() {
@@ -726,17 +726,17 @@ impl GeneratorSavedLocals {
         out
     }
 
-    fn get(&self, local: Local) -> Option<GeneratorSavedLocal> {
+    fn get(&self, local: Local) -> Option<CoroutineSavedLocal> {
         if !self.contains(local) {
             return None;
         }
 
         let idx = self.iter().take_while(|&l| l < local).count();
-        Some(GeneratorSavedLocal::new(idx))
+        Some(CoroutineSavedLocal::new(idx))
     }
 }
 
-impl ops::Deref for GeneratorSavedLocals {
+impl ops::Deref for CoroutineSavedLocals {
     type Target = BitSet<Local>;
 
     fn deref(&self) -> &Self::Target {
@@ -747,13 +747,13 @@ impl ops::Deref for GeneratorSavedLocals {
 /// For every saved local, looks for which locals are StorageLive at the same
 /// time. Generates a bitset for every local of all the other locals that may be
 /// StorageLive simultaneously with that local. This is used in the layout
-/// computation; see `GeneratorLayout` for more.
+/// computation; see `CoroutineLayout` for more.
 fn compute_storage_conflicts<'mir, 'tcx>(
     body: &'mir Body<'tcx>,
-    saved_locals: &GeneratorSavedLocals,
+    saved_locals: &CoroutineSavedLocals,
     always_live_locals: BitSet<Local>,
     mut requires_storage: rustc_mir_dataflow::Results<'tcx, MaybeRequiresStorage<'_, 'mir, 'tcx>>,
-) -> BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal> {
+) -> BitMatrix<CoroutineSavedLocal, CoroutineSavedLocal> {
     assert_eq!(body.local_decls.len(), saved_locals.domain_size());
 
     debug!("compute_storage_conflicts({:?})", body.span);
@@ -775,7 +775,7 @@ fn compute_storage_conflicts<'mir, 'tcx>(
 
     let local_conflicts = visitor.local_conflicts;
 
-    // Compress the matrix using only stored locals (Local -> GeneratorSavedLocal).
+    // Compress the matrix using only stored locals (Local -> CoroutineSavedLocal).
     //
     // NOTE: Today we store a full conflict bitset for every local. Technically
     // this is twice as many bits as we need, since the relation is symmetric.
@@ -801,9 +801,9 @@ fn compute_storage_conflicts<'mir, 'tcx>(
 
 struct StorageConflictVisitor<'mir, 'tcx, 's> {
     body: &'mir Body<'tcx>,
-    saved_locals: &'s GeneratorSavedLocals,
+    saved_locals: &'s CoroutineSavedLocals,
     // FIXME(tmandry): Consider using sparse bitsets here once we have good
-    // benchmarks for generators.
+    // benchmarks for coroutines.
     local_conflicts: BitMatrix<Local, Local>,
 }
 
@@ -858,7 +858,7 @@ fn compute_layout<'tcx>(
     body: &Body<'tcx>,
 ) -> (
     FxHashMap<Local, (Ty<'tcx>, VariantIdx, FieldIdx)>,
-    GeneratorLayout<'tcx>,
+    CoroutineLayout<'tcx>,
     IndexVec<BasicBlock, Option<BitSet<Local>>>,
 ) {
     let LivenessInfo {
@@ -870,10 +870,10 @@ fn compute_layout<'tcx>(
     } = liveness;
 
     // Gather live local types and their indices.
-    let mut locals = IndexVec::<GeneratorSavedLocal, _>::new();
-    let mut tys = IndexVec::<GeneratorSavedLocal, _>::new();
+    let mut locals = IndexVec::<CoroutineSavedLocal, _>::new();
+    let mut tys = IndexVec::<CoroutineSavedLocal, _>::new();
     for (saved_local, local) in saved_locals.iter_enumerated() {
-        debug!("generator saved local {:?} => {:?}", saved_local, local);
+        debug!("coroutine saved local {:?} => {:?}", saved_local, local);
 
         locals.push(local);
         let decl = &body.local_decls[local];
@@ -895,7 +895,7 @@ fn compute_layout<'tcx>(
             _ => false,
         };
         let decl =
-            GeneratorSavedTy { ty: decl.ty, source_info: decl.source_info, ignore_for_traits };
+            CoroutineSavedTy { ty: decl.ty, source_info: decl.source_info, ignore_for_traits };
         debug!(?decl);
 
         tys.push(decl);
@@ -914,9 +914,9 @@ fn compute_layout<'tcx>(
     .copied()
     .collect();
 
-    // Build the generator variant field list.
-    // Create a map from local indices to generator struct indices.
-    let mut variant_fields: IndexVec<VariantIdx, IndexVec<FieldIdx, GeneratorSavedLocal>> =
+    // Build the coroutine variant field list.
+    // Create a map from local indices to coroutine struct indices.
+    let mut variant_fields: IndexVec<VariantIdx, IndexVec<FieldIdx, CoroutineSavedLocal>> =
         iter::repeat(IndexVec::new()).take(RESERVED_VARIANTS).collect();
     let mut remap = FxHashMap::default();
     for (suspension_point_idx, live_locals) in live_locals_at_suspension_points.iter().enumerate() {
@@ -926,7 +926,7 @@ fn compute_layout<'tcx>(
             fields.push(saved_local);
             // Note that if a field is included in multiple variants, we will
             // just use the first one here. That's fine; fields do not move
-            // around inside generators, so it doesn't matter which variant
+            // around inside coroutines, so it doesn't matter which variant
             // index we access them by.
             let idx = FieldIdx::from_usize(idx);
             remap.entry(locals[saved_local]).or_insert((tys[saved_local].ty, variant_index, idx));
@@ -934,8 +934,8 @@ fn compute_layout<'tcx>(
         variant_fields.push(fields);
         variant_source_info.push(source_info_at_suspension_points[suspension_point_idx]);
     }
-    debug!("generator variant_fields = {:?}", variant_fields);
-    debug!("generator storage_conflicts = {:#?}", storage_conflicts);
+    debug!("coroutine variant_fields = {:?}", variant_fields);
+    debug!("coroutine storage_conflicts = {:#?}", storage_conflicts);
 
     let mut field_names = IndexVec::from_elem(None, &tys);
     for var in &body.var_debug_info {
@@ -947,7 +947,7 @@ fn compute_layout<'tcx>(
         field_names.get_or_insert_with(saved_local, || var.name);
     }
 
-    let layout = GeneratorLayout {
+    let layout = CoroutineLayout {
         field_tys: tys,
         field_names,
         variant_fields,
@@ -959,7 +959,7 @@ fn compute_layout<'tcx>(
     (remap, layout, storage_liveness)
 }
 
-/// Replaces the entry point of `body` with a block that switches on the generator discriminant and
+/// Replaces the entry point of `body` with a block that switches on the coroutine discriminant and
 /// dispatches to blocks according to `cases`.
 ///
 /// After this function, the former entry point of the function will be bb1.
@@ -992,14 +992,14 @@ fn insert_switch<'tcx>(
     }
 }
 
-fn elaborate_generator_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+fn elaborate_coroutine_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     use crate::shim::DropShimElaborator;
     use rustc_middle::mir::patch::MirPatch;
     use rustc_mir_dataflow::elaborate_drops::{elaborate_drop, Unwind};
 
-    // Note that `elaborate_drops` only drops the upvars of a generator, and
+    // Note that `elaborate_drops` only drops the upvars of a coroutine, and
     // this is ok because `open_drop` can only be reached within that own
-    // generator's resume function.
+    // coroutine's resume function.
 
     let def_id = body.source.def_id();
     let param_env = tcx.param_env(def_id);
@@ -1047,7 +1047,7 @@ fn elaborate_generator_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     elaborator.patch.apply(body);
 }
 
-fn create_generator_drop_shim<'tcx>(
+fn create_coroutine_drop_shim<'tcx>(
     tcx: TyCtxt<'tcx>,
     transform: &TransformVisitor<'tcx>,
     gen_ty: Ty<'tcx>,
@@ -1070,7 +1070,7 @@ fn create_generator_drop_shim<'tcx>(
 
     for block in body.basic_blocks_mut() {
         let kind = &mut block.terminator_mut().kind;
-        if let TerminatorKind::GeneratorDrop = *kind {
+        if let TerminatorKind::CoroutineDrop = *kind {
             *kind = TerminatorKind::Return;
         }
     }
@@ -1078,9 +1078,9 @@ fn create_generator_drop_shim<'tcx>(
     // Replace the return variable
     body.local_decls[RETURN_PLACE] = LocalDecl::with_source_info(Ty::new_unit(tcx), source_info);
 
-    make_generator_state_argument_indirect(tcx, &mut body);
+    make_coroutine_state_argument_indirect(tcx, &mut body);
 
-    // Change the generator argument from &mut to *mut
+    // Change the coroutine argument from &mut to *mut
     body.local_decls[SELF_ARG] = LocalDecl::with_source_info(
         Ty::new_ptr(tcx, ty::TypeAndMut { ty: gen_ty, mutbl: hir::Mutability::Mut }),
         source_info,
@@ -1104,10 +1104,10 @@ fn create_generator_drop_shim<'tcx>(
         None,
     );
 
-    // Temporary change MirSource to generator's instance so that dump_mir produces more sensible
+    // Temporary change MirSource to coroutine's instance so that dump_mir produces more sensible
     // filename.
     body.source.instance = gen_instance;
-    dump_mir(tcx, false, "generator_drop", &0, &body, |_, _| Ok(()));
+    dump_mir(tcx, false, "coroutine_drop", &0, &body, |_, _| Ok(()));
     body.source.instance = drop_instance;
 
     body
@@ -1182,7 +1182,7 @@ fn can_unwind<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
             | TerminatorKind::UnwindTerminate(_)
             | TerminatorKind::Return
             | TerminatorKind::Unreachable
-            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::CoroutineDrop
             | TerminatorKind::FalseEdge { .. }
             | TerminatorKind::FalseUnwind { .. } => {}
 
@@ -1191,7 +1191,7 @@ fn can_unwind<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
             TerminatorKind::UnwindResume => {}
 
             TerminatorKind::Yield { .. } => {
-                unreachable!("`can_unwind` called before generator transform")
+                unreachable!("`can_unwind` called before coroutine transform")
             }
 
             // These may unwind.
@@ -1206,7 +1206,7 @@ fn can_unwind<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
     false
 }
 
-fn create_generator_resume_function<'tcx>(
+fn create_coroutine_resume_function<'tcx>(
     tcx: TyCtxt<'tcx>,
     transform: TransformVisitor<'tcx>,
     body: &mut Body<'tcx>,
@@ -1214,7 +1214,7 @@ fn create_generator_resume_function<'tcx>(
 ) {
     let can_unwind = can_unwind(tcx, body);
 
-    // Poison the generator when it unwinds
+    // Poison the coroutine when it unwinds
     if can_unwind {
         let source_info = SourceInfo::outermost(body.span);
         let poison_block = body.basic_blocks_mut().push(BasicBlockData {
@@ -1253,26 +1253,26 @@ fn create_generator_resume_function<'tcx>(
     cases.insert(0, (UNRESUMED, START_BLOCK));
 
     // Panic when resumed on the returned or poisoned state
-    let generator_kind = body.generator_kind().unwrap();
+    let coroutine_kind = body.coroutine_kind().unwrap();
 
     if can_unwind {
         cases.insert(
             1,
-            (POISONED, insert_panic_block(tcx, body, ResumedAfterPanic(generator_kind))),
+            (POISONED, insert_panic_block(tcx, body, ResumedAfterPanic(coroutine_kind))),
         );
     }
 
     if can_return {
         cases.insert(
             1,
-            (RETURNED, insert_panic_block(tcx, body, ResumedAfterReturn(generator_kind))),
+            (RETURNED, insert_panic_block(tcx, body, ResumedAfterReturn(coroutine_kind))),
         );
     }
 
     insert_switch(body, cases, &transform, TerminatorKind::Unreachable);
 
-    make_generator_state_argument_indirect(tcx, body);
-    make_generator_state_argument_pinned(tcx, body);
+    make_coroutine_state_argument_indirect(tcx, body);
+    make_coroutine_state_argument_pinned(tcx, body);
 
     // Make sure we remove dead blocks to remove
     // unrelated code from the drop part of the function
@@ -1280,7 +1280,7 @@ fn create_generator_resume_function<'tcx>(
 
     pm::run_passes_no_validate(tcx, body, &[&abort_unwinding_calls::AbortUnwindingCalls], None);
 
-    dump_mir(tcx, false, "generator_resume", &0, body, |_, _| Ok(()));
+    dump_mir(tcx, false, "coroutine_resume", &0, body, |_, _| Ok(()));
 }
 
 fn insert_clean_drop(body: &mut Body<'_>) -> BasicBlock {
@@ -1294,7 +1294,7 @@ fn insert_clean_drop(body: &mut Body<'_>) -> BasicBlock {
     };
     let source_info = SourceInfo::outermost(body.span);
 
-    // Create a block to destroy an unresumed generators. This can only destroy upvars.
+    // Create a block to destroy an unresumed coroutines. This can only destroy upvars.
     body.basic_blocks_mut().push(BasicBlockData {
         statements: Vec::new(),
         terminator: Some(Terminator { source_info, kind: term }),
@@ -1302,7 +1302,7 @@ fn insert_clean_drop(body: &mut Body<'_>) -> BasicBlock {
     })
 }
 
-/// An operation that can be performed on a generator.
+/// An operation that can be performed on a coroutine.
 #[derive(PartialEq, Copy, Clone)]
 enum Operation {
     Resume,
@@ -1381,64 +1381,64 @@ fn create_cases<'tcx>(
 }
 
 #[instrument(level = "debug", skip(tcx), ret)]
-pub(crate) fn mir_generator_witnesses<'tcx>(
+pub(crate) fn mir_coroutine_witnesses<'tcx>(
     tcx: TyCtxt<'tcx>,
     def_id: LocalDefId,
-) -> Option<GeneratorLayout<'tcx>> {
+) -> Option<CoroutineLayout<'tcx>> {
     let (body, _) = tcx.mir_promoted(def_id);
     let body = body.borrow();
     let body = &*body;
 
-    // The first argument is the generator type passed by value
+    // The first argument is the coroutine type passed by value
     let gen_ty = body.local_decls[ty::CAPTURE_STRUCT_LOCAL].ty;
 
     // Get the interior types and args which typeck computed
     let movable = match *gen_ty.kind() {
-        ty::Generator(_, _, movability) => movability == hir::Movability::Movable,
+        ty::Coroutine(_, _, movability) => movability == hir::Movability::Movable,
         ty::Error(_) => return None,
-        _ => span_bug!(body.span, "unexpected generator type {}", gen_ty),
+        _ => span_bug!(body.span, "unexpected coroutine type {}", gen_ty),
     };
 
-    // When first entering the generator, move the resume argument into its new local.
+    // When first entering the coroutine, move the resume argument into its new local.
     let always_live_locals = always_storage_live_locals(&body);
 
     let liveness_info = locals_live_across_suspend_points(tcx, body, &always_live_locals, movable);
 
     // Extract locals which are live across suspension point into `layout`
-    // `remap` gives a mapping from local indices onto generator struct indices
+    // `remap` gives a mapping from local indices onto coroutine struct indices
     // `storage_liveness` tells us which locals have live storage at suspension points
-    let (_, generator_layout, _) = compute_layout(liveness_info, body);
+    let (_, coroutine_layout, _) = compute_layout(liveness_info, body);
 
-    check_suspend_tys(tcx, &generator_layout, &body);
+    check_suspend_tys(tcx, &coroutine_layout, &body);
 
-    Some(generator_layout)
+    Some(coroutine_layout)
 }
 
 impl<'tcx> MirPass<'tcx> for StateTransform {
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         let Some(yield_ty) = body.yield_ty() else {
-            // This only applies to generators
+            // This only applies to coroutines
             return;
         };
 
-        assert!(body.generator_drop().is_none());
+        assert!(body.coroutine_drop().is_none());
 
-        // The first argument is the generator type passed by value
+        // The first argument is the coroutine type passed by value
         let gen_ty = body.local_decls.raw[1].ty;
 
         // Get the discriminant type and args which typeck computed
         let (discr_ty, movable) = match *gen_ty.kind() {
-            ty::Generator(_, args, movability) => {
-                let args = args.as_generator();
+            ty::Coroutine(_, args, movability) => {
+                let args = args.as_coroutine();
                 (args.discr_ty(tcx), movability == hir::Movability::Movable)
             }
             _ => {
-                tcx.sess.delay_span_bug(body.span, format!("unexpected generator type {gen_ty}"));
+                tcx.sess.delay_span_bug(body.span, format!("unexpected coroutine type {gen_ty}"));
                 return;
             }
         };
 
-        let is_async_kind = matches!(body.generator_kind(), Some(GeneratorKind::Async(_)));
+        let is_async_kind = matches!(body.coroutine_kind(), Some(CoroutineKind::Async(_)));
         let (state_adt_ref, state_args) = if is_async_kind {
             // Compute Poll<return_ty>
             let poll_did = tcx.require_lang_item(LangItem::Poll, None);
@@ -1446,8 +1446,8 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
             let poll_args = tcx.mk_args(&[body.return_ty().into()]);
             (poll_adt_ref, poll_args)
         } else {
-            // Compute GeneratorState<yield_ty, return_ty>
-            let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
+            // Compute CoroutineState<yield_ty, return_ty>
+            let state_did = tcx.require_lang_item(LangItem::CoroutineState, None);
             let state_adt_ref = tcx.adt_def(state_did);
             let state_args = tcx.mk_args(&[yield_ty.into(), body.return_ty().into()]);
             (state_adt_ref, state_args)
@@ -1465,8 +1465,8 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
 
         // We also replace the resume argument and insert an `Assign`.
         // This is needed because the resume argument `_2` might be live across a `yield`, in which
-        // case there is no `Assign` to it that the transform can turn into a store to the generator
-        // state. After the yield the slot in the generator state would then be uninitialized.
+        // case there is no `Assign` to it that the transform can turn into a store to the coroutine
+        // state. After the yield the slot in the coroutine state would then be uninitialized.
         let resume_local = Local::new(2);
         let resume_ty = if is_async_kind {
             Ty::new_task_context(tcx)
@@ -1475,7 +1475,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
         };
         let new_resume_local = replace_local(resume_local, resume_ty, body, tcx);
 
-        // When first entering the generator, move the resume argument into its new local.
+        // When first entering the coroutine, move the resume argument into its new local.
         let source_info = SourceInfo::outermost(body.span);
         let stmts = &mut body.basic_blocks_mut()[START_BLOCK].statements;
         stmts.insert(
@@ -1495,7 +1495,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
             locals_live_across_suspend_points(tcx, body, &always_live_locals, movable);
 
         if tcx.sess.opts.unstable_opts.validate_mir {
-            let mut vis = EnsureGeneratorFieldAssignmentsNeverAlias {
+            let mut vis = EnsureCoroutineFieldAssignmentsNeverAlias {
                 assigned_local: None,
                 saved_locals: &liveness_info.saved_locals,
                 storage_conflicts: &liveness_info.storage_conflicts,
@@ -1505,16 +1505,16 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
         }
 
         // Extract locals which are live across suspension point into `layout`
-        // `remap` gives a mapping from local indices onto generator struct indices
+        // `remap` gives a mapping from local indices onto coroutine struct indices
         // `storage_liveness` tells us which locals have live storage at suspension points
         let (remap, layout, storage_liveness) = compute_layout(liveness_info, body);
 
         let can_return = can_return(tcx, body, tcx.param_env(body.source.def_id()));
 
-        // Run the transformation which converts Places from Local to generator struct
+        // Run the transformation which converts Places from Local to coroutine struct
         // accesses for locals in `remap`.
-        // It also rewrites `return x` and `yield y` as writing a new generator state and returning
-        // either GeneratorState::Complete(x) and GeneratorState::Yielded(y),
+        // It also rewrites `return x` and `yield y` as writing a new coroutine state and returning
+        // either CoroutineState::Complete(x) and CoroutineState::Yielded(y),
         // or Poll::Ready(x) and Poll::Pending respectively depending on `is_async_kind`.
         let mut transform = TransformVisitor {
             tcx,
@@ -1541,30 +1541,30 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
             var.argument_index = None;
         }
 
-        body.generator.as_mut().unwrap().yield_ty = None;
-        body.generator.as_mut().unwrap().generator_layout = Some(layout);
+        body.coroutine.as_mut().unwrap().yield_ty = None;
+        body.coroutine.as_mut().unwrap().coroutine_layout = Some(layout);
 
-        // Insert `drop(generator_struct)` which is used to drop upvars for generators in
+        // Insert `drop(coroutine_struct)` which is used to drop upvars for coroutines in
         // the unresumed state.
-        // This is expanded to a drop ladder in `elaborate_generator_drops`.
+        // This is expanded to a drop ladder in `elaborate_coroutine_drops`.
         let drop_clean = insert_clean_drop(body);
 
-        dump_mir(tcx, false, "generator_pre-elab", &0, body, |_, _| Ok(()));
+        dump_mir(tcx, false, "coroutine_pre-elab", &0, body, |_, _| Ok(()));
 
-        // Expand `drop(generator_struct)` to a drop ladder which destroys upvars.
+        // Expand `drop(coroutine_struct)` to a drop ladder which destroys upvars.
         // If any upvars are moved out of, drop elaboration will handle upvar destruction.
         // However we need to also elaborate the code generated by `insert_clean_drop`.
-        elaborate_generator_drops(tcx, body);
+        elaborate_coroutine_drops(tcx, body);
 
-        dump_mir(tcx, false, "generator_post-transform", &0, body, |_, _| Ok(()));
+        dump_mir(tcx, false, "coroutine_post-transform", &0, body, |_, _| Ok(()));
 
-        // Create a copy of our MIR and use it to create the drop shim for the generator
-        let drop_shim = create_generator_drop_shim(tcx, &transform, gen_ty, body, drop_clean);
+        // Create a copy of our MIR and use it to create the drop shim for the coroutine
+        let drop_shim = create_coroutine_drop_shim(tcx, &transform, gen_ty, body, drop_clean);
 
-        body.generator.as_mut().unwrap().generator_drop = Some(drop_shim);
+        body.coroutine.as_mut().unwrap().coroutine_drop = Some(drop_shim);
 
-        // Create the Generator::resume / Future::poll function
-        create_generator_resume_function(tcx, transform, body, can_return);
+        // Create the Coroutine::resume / Future::poll function
+        create_coroutine_resume_function(tcx, transform, body, can_return);
 
         // Run derefer to fix Derefs that are not in the first place
         deref_finder(tcx, body);
@@ -1572,25 +1572,25 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
 }
 
 /// Looks for any assignments between locals (e.g., `_4 = _5`) that will both be converted to fields
-/// in the generator state machine but whose storage is not marked as conflicting
+/// in the coroutine state machine but whose storage is not marked as conflicting
 ///
 /// Validation needs to happen immediately *before* `TransformVisitor` is invoked, not after.
 ///
 /// This condition would arise when the assignment is the last use of `_5` but the initial
 /// definition of `_4` if we weren't extra careful to mark all locals used inside a statement as
-/// conflicting. Non-conflicting generator saved locals may be stored at the same location within
-/// the generator state machine, which would result in ill-formed MIR: the left-hand and right-hand
+/// conflicting. Non-conflicting coroutine saved locals may be stored at the same location within
+/// the coroutine state machine, which would result in ill-formed MIR: the left-hand and right-hand
 /// sides of an assignment may not alias. This caused a miscompilation in [#73137].
 ///
 /// [#73137]: https://github.com/rust-lang/rust/issues/73137
-struct EnsureGeneratorFieldAssignmentsNeverAlias<'a> {
-    saved_locals: &'a GeneratorSavedLocals,
-    storage_conflicts: &'a BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
-    assigned_local: Option<GeneratorSavedLocal>,
+struct EnsureCoroutineFieldAssignmentsNeverAlias<'a> {
+    saved_locals: &'a CoroutineSavedLocals,
+    storage_conflicts: &'a BitMatrix<CoroutineSavedLocal, CoroutineSavedLocal>,
+    assigned_local: Option<CoroutineSavedLocal>,
 }
 
-impl EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
-    fn saved_local_for_direct_place(&self, place: Place<'_>) -> Option<GeneratorSavedLocal> {
+impl EnsureCoroutineFieldAssignmentsNeverAlias<'_> {
+    fn saved_local_for_direct_place(&self, place: Place<'_>) -> Option<CoroutineSavedLocal> {
         if place.is_indirect() {
             return None;
         }
@@ -1609,7 +1609,7 @@ impl EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
     }
 }
 
-impl<'tcx> Visitor<'tcx> for EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
+impl<'tcx> Visitor<'tcx> for EnsureCoroutineFieldAssignmentsNeverAlias<'_> {
     fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) {
         let Some(lhs) = self.assigned_local else {
             // This visitor only invokes `visit_place` for the right-hand side of an assignment
@@ -1624,7 +1624,7 @@ impl<'tcx> Visitor<'tcx> for EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
 
         if !self.storage_conflicts.contains(lhs, rhs) {
             bug!(
-                "Assignment between generator saved locals whose storage is not \
+                "Assignment between coroutine saved locals whose storage is not \
                     marked as conflicting: {:?}: {:?} = {:?}",
                 location,
                 lhs,
@@ -1691,14 +1691,14 @@ impl<'tcx> Visitor<'tcx> for EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
             | TerminatorKind::Unreachable
             | TerminatorKind::Drop { .. }
             | TerminatorKind::Assert { .. }
-            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::CoroutineDrop
             | TerminatorKind::FalseEdge { .. }
             | TerminatorKind::FalseUnwind { .. } => {}
         }
     }
 }
 
-fn check_suspend_tys<'tcx>(tcx: TyCtxt<'tcx>, layout: &GeneratorLayout<'tcx>, body: &Body<'tcx>) {
+fn check_suspend_tys<'tcx>(tcx: TyCtxt<'tcx>, layout: &CoroutineLayout<'tcx>, body: &Body<'tcx>) {
     let mut linted_tys = FxHashSet::default();
 
     // We want a user-facing param-env.
diff --git a/compiler/rustc_mir_transform/src/cost_checker.rs b/compiler/rustc_mir_transform/src/cost_checker.rs
new file mode 100644
index 00000000000..9bb26693cb2
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/cost_checker.rs
@@ -0,0 +1,98 @@
+use rustc_middle::mir::visit::*;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt};
+
+const INSTR_COST: usize = 5;
+const CALL_PENALTY: usize = 25;
+const LANDINGPAD_PENALTY: usize = 50;
+const RESUME_PENALTY: usize = 45;
+
+/// Verify that the callee body is compatible with the caller.
+#[derive(Clone)]
+pub(crate) struct CostChecker<'b, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    cost: usize,
+    callee_body: &'b Body<'tcx>,
+    instance: Option<ty::Instance<'tcx>>,
+}
+
+impl<'b, 'tcx> CostChecker<'b, 'tcx> {
+    pub fn new(
+        tcx: TyCtxt<'tcx>,
+        param_env: ParamEnv<'tcx>,
+        instance: Option<ty::Instance<'tcx>>,
+        callee_body: &'b Body<'tcx>,
+    ) -> CostChecker<'b, 'tcx> {
+        CostChecker { tcx, param_env, callee_body, instance, cost: 0 }
+    }
+
+    pub fn cost(&self) -> usize {
+        self.cost
+    }
+
+    fn instantiate_ty(&self, v: Ty<'tcx>) -> Ty<'tcx> {
+        if let Some(instance) = self.instance {
+            instance.instantiate_mir(self.tcx, ty::EarlyBinder::bind(&v))
+        } else {
+            v
+        }
+    }
+}
+
+impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) {
+        // Don't count StorageLive/StorageDead in the inlining cost.
+        match statement.kind {
+            StatementKind::StorageLive(_)
+            | StatementKind::StorageDead(_)
+            | StatementKind::Deinit(_)
+            | StatementKind::Nop => {}
+            _ => self.cost += INSTR_COST,
+        }
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) {
+        let tcx = self.tcx;
+        match terminator.kind {
+            TerminatorKind::Drop { ref place, unwind, .. } => {
+                // If the place doesn't actually need dropping, treat it like a regular goto.
+                let ty = self.instantiate_ty(place.ty(self.callee_body, tcx).ty);
+                if ty.needs_drop(tcx, self.param_env) {
+                    self.cost += CALL_PENALTY;
+                    if let UnwindAction::Cleanup(_) = unwind {
+                        self.cost += LANDINGPAD_PENALTY;
+                    }
+                } else {
+                    self.cost += INSTR_COST;
+                }
+            }
+            TerminatorKind::Call { func: Operand::Constant(ref f), unwind, .. } => {
+                let fn_ty = self.instantiate_ty(f.const_.ty());
+                self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind() && tcx.is_intrinsic(def_id) {
+                    // Don't give intrinsics the extra penalty for calls
+                    INSTR_COST
+                } else {
+                    CALL_PENALTY
+                };
+                if let UnwindAction::Cleanup(_) = unwind {
+                    self.cost += LANDINGPAD_PENALTY;
+                }
+            }
+            TerminatorKind::Assert { unwind, .. } => {
+                self.cost += CALL_PENALTY;
+                if let UnwindAction::Cleanup(_) = unwind {
+                    self.cost += LANDINGPAD_PENALTY;
+                }
+            }
+            TerminatorKind::UnwindResume => self.cost += RESUME_PENALTY,
+            TerminatorKind::InlineAsm { unwind, .. } => {
+                self.cost += INSTR_COST;
+                if let UnwindAction::Cleanup(_) = unwind {
+                    self.cost += LANDINGPAD_PENALTY;
+                }
+            }
+            _ => self.cost += INSTR_COST,
+        }
+    }
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs
index a83ccf8fc3c..d07f59bc72a 100644
--- a/compiler/rustc_mir_transform/src/coverage/counters.rs
+++ b/compiler/rustc_mir_transform/src/coverage/counters.rs
@@ -169,22 +169,22 @@ impl CoverageCounters {
         self.bcb_counters[bcb].as_ref()
     }
 
-    pub(super) fn take_bcb_counter(&mut self, bcb: BasicCoverageBlock) -> Option<BcbCounter> {
-        self.bcb_counters[bcb].take()
-    }
-
-    pub(super) fn drain_bcb_counters(
-        &mut self,
-    ) -> impl Iterator<Item = (BasicCoverageBlock, BcbCounter)> + '_ {
+    pub(super) fn bcb_node_counters(
+        &self,
+    ) -> impl Iterator<Item = (BasicCoverageBlock, &BcbCounter)> {
         self.bcb_counters
-            .iter_enumerated_mut()
-            .filter_map(|(bcb, counter)| Some((bcb, counter.take()?)))
+            .iter_enumerated()
+            .filter_map(|(bcb, counter_kind)| Some((bcb, counter_kind.as_ref()?)))
     }
 
-    pub(super) fn drain_bcb_edge_counters(
-        &mut self,
-    ) -> impl Iterator<Item = ((BasicCoverageBlock, BasicCoverageBlock), BcbCounter)> + '_ {
-        self.bcb_edge_counters.drain()
+    /// For each edge in the BCB graph that has an associated counter, yields
+    /// that edge's *from* and *to* nodes, and its counter.
+    pub(super) fn bcb_edge_counters(
+        &self,
+    ) -> impl Iterator<Item = (BasicCoverageBlock, BasicCoverageBlock, &BcbCounter)> {
+        self.bcb_edge_counters
+            .iter()
+            .map(|(&(from_bcb, to_bcb), counter_kind)| (from_bcb, to_bcb, counter_kind))
     }
 
     pub(super) fn take_expressions(&mut self) -> IndexVec<ExpressionId, Expression> {
diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs
index 9a7adaada09..6bab62aa854 100644
--- a/compiler/rustc_mir_transform/src/coverage/graph.rs
+++ b/compiler/rustc_mir_transform/src/coverage/graph.rs
@@ -147,7 +147,7 @@ impl CoverageGraph {
                 | TerminatorKind::Unreachable
                 | TerminatorKind::Drop { .. }
                 | TerminatorKind::Call { .. }
-                | TerminatorKind::GeneratorDrop
+                | TerminatorKind::CoroutineDrop
                 | TerminatorKind::Assert { .. }
                 | TerminatorKind::FalseEdge { .. }
                 | TerminatorKind::FalseUnwind { .. }
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index 6fdaff6b4c0..c9b36ba25ac 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -8,7 +8,7 @@ mod spans;
 mod tests;
 
 use self::counters::{BcbCounter, CoverageCounters};
-use self::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
+use self::graph::CoverageGraph;
 use self::spans::CoverageSpans;
 
 use crate::MirPass;
@@ -104,7 +104,6 @@ struct Instrumentor<'a, 'tcx> {
     function_source_hash: u64,
     basic_coverage_blocks: CoverageGraph,
     coverage_counters: CoverageCounters,
-    mappings: Vec<Mapping>,
 }
 
 impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
@@ -145,7 +144,6 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
             function_source_hash,
             basic_coverage_blocks,
             coverage_counters,
-            mappings: Vec::new(),
         }
     }
 
@@ -168,148 +166,99 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
         // and all `Expression` dependencies (operands) are also generated, for any other
         // `BasicCoverageBlock`s not already associated with a coverage span.
         let bcb_has_coverage_spans = |bcb| coverage_spans.bcb_has_coverage_spans(bcb);
-        let result = self
-            .coverage_counters
-            .make_bcb_counters(&mut self.basic_coverage_blocks, bcb_has_coverage_spans);
-
-        if let Ok(()) = result {
-            ////////////////////////////////////////////////////
-            // Remove the counter or edge counter from of each coverage cpan's associated
-            // `BasicCoverageBlock`, and inject a `Coverage` statement into the MIR.
-            //
-            // `Coverage` statements injected from coverage spans will include the code regions
-            // (source code start and end positions) to be counted by the associated counter.
-            //
-            // These coverage-span-associated counters are removed from their associated
-            // `BasicCoverageBlock`s so that the only remaining counters in the `CoverageGraph`
-            // are indirect counters (to be injected next, without associated code regions).
-            self.inject_coverage_span_counters(&coverage_spans);
-
-            ////////////////////////////////////////////////////
-            // For any remaining `BasicCoverageBlock` counters (that were not associated with
-            // any coverage span), inject `Coverage` statements (_without_ code region spans)
-            // to ensure `BasicCoverageBlock` counters that other `Expression`s may depend on
-            // are in fact counted, even though they don't directly contribute to counting
-            // their own independent code region's coverage.
-            self.inject_indirect_counters();
-        };
+        self.coverage_counters
+            .make_bcb_counters(&mut self.basic_coverage_blocks, bcb_has_coverage_spans)
+            .unwrap_or_else(|e| {
+                bug!("Error processing: {:?}: {:?}", self.mir_body.source.def_id(), e.message)
+            });
 
-        if let Err(e) = result {
-            bug!("Error processing: {:?}: {:?}", self.mir_body.source.def_id(), e.message)
-        };
+        let mappings = self.create_mappings_and_inject_coverage_statements(&coverage_spans);
 
         self.mir_body.function_coverage_info = Some(Box::new(FunctionCoverageInfo {
             function_source_hash: self.function_source_hash,
             num_counters: self.coverage_counters.num_counters(),
             expressions: self.coverage_counters.take_expressions(),
-            mappings: std::mem::take(&mut self.mappings),
+            mappings,
         }));
     }
 
-    /// Injects a single [`StatementKind::Coverage`] for each BCB that has one
-    /// or more coverage spans.
-    fn inject_coverage_span_counters(&mut self, coverage_spans: &CoverageSpans) {
-        let tcx = self.tcx;
-        let source_map = tcx.sess.source_map();
+    /// For each [`BcbCounter`] associated with a BCB node or BCB edge, create
+    /// any corresponding mappings (for BCB nodes only), and inject any necessary
+    /// coverage statements into MIR.
+    fn create_mappings_and_inject_coverage_statements(
+        &mut self,
+        coverage_spans: &CoverageSpans,
+    ) -> Vec<Mapping> {
+        let source_map = self.tcx.sess.source_map();
         let body_span = self.body_span;
 
         use rustc_session::RemapFileNameExt;
         let file_name =
             Symbol::intern(&self.source_file.name.for_codegen(self.tcx.sess).to_string_lossy());
 
-        for (bcb, spans) in coverage_spans.bcbs_with_coverage_spans() {
-            let counter_kind = self.coverage_counters.take_bcb_counter(bcb).unwrap_or_else(|| {
-                bug!("Every BasicCoverageBlock should have a Counter or Expression");
-            });
-
-            let term = counter_kind.as_term();
-            self.mappings.extend(spans.iter().map(|&span| {
-                let code_region = make_code_region(source_map, file_name, span, body_span);
-                Mapping { code_region, term }
-            }));
-
-            inject_statement(
-                self.mir_body,
-                self.make_mir_coverage_kind(&counter_kind),
-                self.bcb_leader_bb(bcb),
-            );
-        }
-    }
+        let mut mappings = Vec::new();
+
+        // Process the counters and spans associated with BCB nodes.
+        for (bcb, counter_kind) in self.coverage_counters.bcb_node_counters() {
+            let spans = coverage_spans.spans_for_bcb(bcb);
+            let has_mappings = !spans.is_empty();
+
+            // If this BCB has any coverage spans, add corresponding mappings to
+            // the mappings table.
+            if has_mappings {
+                let term = counter_kind.as_term();
+                mappings.extend(spans.iter().map(|&span| {
+                    let code_region = make_code_region(source_map, file_name, span, body_span);
+                    Mapping { code_region, term }
+                }));
+            }
 
-    /// At this point, any BCB with coverage counters has already had its counter injected
-    /// into MIR, and had its counter removed from `coverage_counters` (via `take_counter()`).
-    ///
-    /// Any other counter associated with a `BasicCoverageBlock`, or its incoming edge, but not
-    /// associated with a coverage span, should only exist if the counter is an `Expression`
-    /// dependency (one of the expression operands). Collect them, and inject the additional
-    /// counters into the MIR, without a reportable coverage span.
-    fn inject_indirect_counters(&mut self) {
-        let mut bcb_counters_without_direct_coverage_spans = Vec::new();
-        for (target_bcb, counter_kind) in self.coverage_counters.drain_bcb_counters() {
-            bcb_counters_without_direct_coverage_spans.push((None, target_bcb, counter_kind));
-        }
-        for ((from_bcb, target_bcb), counter_kind) in
-            self.coverage_counters.drain_bcb_edge_counters()
-        {
-            bcb_counters_without_direct_coverage_spans.push((
-                Some(from_bcb),
-                target_bcb,
-                counter_kind,
-            ));
+            let do_inject = match counter_kind {
+                // Counter-increment statements always need to be injected.
+                BcbCounter::Counter { .. } => true,
+                // The only purpose of expression-used statements is to detect
+                // when a mapping is unreachable, so we only inject them for
+                // expressions with one or more mappings.
+                BcbCounter::Expression { .. } => has_mappings,
+            };
+            if do_inject {
+                inject_statement(
+                    self.mir_body,
+                    self.make_mir_coverage_kind(counter_kind),
+                    self.basic_coverage_blocks[bcb].leader_bb(),
+                );
+            }
         }
 
-        for (edge_from_bcb, target_bcb, counter_kind) in bcb_counters_without_direct_coverage_spans
-        {
-            match counter_kind {
-                BcbCounter::Counter { .. } => {
-                    let inject_to_bb = if let Some(from_bcb) = edge_from_bcb {
-                        // The MIR edge starts `from_bb` (the outgoing / last BasicBlock in
-                        // `from_bcb`) and ends at `to_bb` (the incoming / first BasicBlock in the
-                        // `target_bcb`; also called the `leader_bb`).
-                        let from_bb = self.bcb_last_bb(from_bcb);
-                        let to_bb = self.bcb_leader_bb(target_bcb);
-
-                        let new_bb = inject_edge_counter_basic_block(self.mir_body, from_bb, to_bb);
-                        debug!(
-                            "Edge {:?} (last {:?}) -> {:?} (leader {:?}) requires a new MIR \
-                            BasicBlock {:?}, for unclaimed edge counter {:?}",
-                            edge_from_bcb, from_bb, target_bcb, to_bb, new_bb, counter_kind,
-                        );
-                        new_bb
-                    } else {
-                        let target_bb = self.bcb_last_bb(target_bcb);
-                        debug!(
-                            "{:?} ({:?}) gets a new Coverage statement for unclaimed counter {:?}",
-                            target_bcb, target_bb, counter_kind,
-                        );
-                        target_bb
-                    };
-
-                    inject_statement(
-                        self.mir_body,
-                        self.make_mir_coverage_kind(&counter_kind),
-                        inject_to_bb,
-                    );
-                }
-                // Experessions with no associated spans don't need to inject a statement.
-                BcbCounter::Expression { .. } => {}
+        // Process the counters associated with BCB edges.
+        for (from_bcb, to_bcb, counter_kind) in self.coverage_counters.bcb_edge_counters() {
+            let do_inject = match counter_kind {
+                // Counter-increment statements always need to be injected.
+                BcbCounter::Counter { .. } => true,
+                // BCB-edge expressions never have mappings, so they never need
+                // a corresponding statement.
+                BcbCounter::Expression { .. } => false,
+            };
+            if !do_inject {
+                continue;
             }
-        }
-    }
 
-    #[inline]
-    fn bcb_leader_bb(&self, bcb: BasicCoverageBlock) -> BasicBlock {
-        self.bcb_data(bcb).leader_bb()
-    }
+            // We need to inject a coverage statement into a new BB between the
+            // last BB of `from_bcb` and the first BB of `to_bcb`.
+            let from_bb = self.basic_coverage_blocks[from_bcb].last_bb();
+            let to_bb = self.basic_coverage_blocks[to_bcb].leader_bb();
 
-    #[inline]
-    fn bcb_last_bb(&self, bcb: BasicCoverageBlock) -> BasicBlock {
-        self.bcb_data(bcb).last_bb()
-    }
+            let new_bb = inject_edge_counter_basic_block(self.mir_body, from_bb, to_bb);
+            debug!(
+                "Edge {from_bcb:?} (last {from_bb:?}) -> {to_bcb:?} (leader {to_bb:?}) \
+                requires a new MIR BasicBlock {new_bb:?} for edge counter {counter_kind:?}",
+            );
+
+            // Inject a counter into the newly-created BB.
+            inject_statement(self.mir_body, self.make_mir_coverage_kind(&counter_kind), new_bb);
+        }
 
-    #[inline]
-    fn bcb_data(&self, bcb: BasicCoverageBlock) -> &BasicCoverageBlockData {
-        &self.basic_coverage_blocks[bcb]
+        mappings
     }
 
     fn make_mir_coverage_kind(&self, counter_kind: &BcbCounter) -> CoverageKind {
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
index f1a0f762041..704eea413e1 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -2,7 +2,7 @@ use std::cell::OnceCell;
 
 use rustc_data_structures::graph::WithNumNodes;
 use rustc_index::IndexVec;
-use rustc_middle::mir::{self, AggregateKind, Rvalue, Statement, StatementKind};
+use rustc_middle::mir;
 use rustc_span::{BytePos, ExpnKind, MacroKind, Span, Symbol, DUMMY_SP};
 
 use super::graph::{BasicCoverageBlock, CoverageGraph, START_BCB};
@@ -41,13 +41,8 @@ impl CoverageSpans {
         !self.bcb_to_spans[bcb].is_empty()
     }
 
-    pub(super) fn bcbs_with_coverage_spans(
-        &self,
-    ) -> impl Iterator<Item = (BasicCoverageBlock, &[Span])> {
-        self.bcb_to_spans.iter_enumerated().filter_map(|(bcb, spans)| {
-            // Only yield BCBs that have at least one coverage span.
-            (!spans.is_empty()).then_some((bcb, spans.as_slice()))
-        })
+    pub(super) fn spans_for_bcb(&self, bcb: BasicCoverageBlock) -> &[Span] {
+        &self.bcb_to_spans[bcb]
     }
 }
 
@@ -75,29 +70,15 @@ struct CoverageSpan {
 
 impl CoverageSpan {
     pub fn for_fn_sig(fn_sig_span: Span) -> Self {
-        Self {
-            span: fn_sig_span,
-            expn_span: fn_sig_span,
-            current_macro_or_none: Default::default(),
-            bcb: START_BCB,
-            merged_spans: vec![],
-            is_closure: false,
-        }
+        Self::new(fn_sig_span, fn_sig_span, START_BCB, false)
     }
 
-    pub fn for_statement(
-        statement: &Statement<'_>,
+    pub(super) fn new(
         span: Span,
         expn_span: Span,
         bcb: BasicCoverageBlock,
+        is_closure: bool,
     ) -> Self {
-        let is_closure = match statement.kind {
-            StatementKind::Assign(box (_, Rvalue::Aggregate(box ref kind, _))) => {
-                matches!(kind, AggregateKind::Closure(_, _) | AggregateKind::Generator(_, _, _))
-            }
-            _ => false,
-        };
-
         Self {
             span,
             expn_span,
@@ -108,17 +89,6 @@ impl CoverageSpan {
         }
     }
 
-    pub fn for_terminator(span: Span, expn_span: Span, bcb: BasicCoverageBlock) -> Self {
-        Self {
-            span,
-            expn_span,
-            current_macro_or_none: Default::default(),
-            bcb,
-            merged_spans: vec![span],
-            is_closure: false,
-        }
-    }
-
     pub fn merge_from(&mut self, mut other: CoverageSpan) {
         debug_assert!(self.is_mergeable(&other));
         self.span = self.span.to(other.span);
diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
index 4c20997e633..6189e5379ea 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
@@ -1,5 +1,7 @@
+use rustc_data_structures::captures::Captures;
 use rustc_middle::mir::{
-    self, FakeReadCause, Statement, StatementKind, Terminator, TerminatorKind,
+    self, AggregateKind, FakeReadCause, Rvalue, Statement, StatementKind, Terminator,
+    TerminatorKind,
 };
 use rustc_span::Span;
 
@@ -12,7 +14,7 @@ pub(super) fn mir_to_initial_sorted_coverage_spans(
     body_span: Span,
     basic_coverage_blocks: &CoverageGraph,
 ) -> Vec<CoverageSpan> {
-    let mut initial_spans = Vec::<CoverageSpan>::with_capacity(mir_body.basic_blocks.len() * 2);
+    let mut initial_spans = Vec::with_capacity(mir_body.basic_blocks.len() * 2);
     for (bcb, bcb_data) in basic_coverage_blocks.iter_enumerated() {
         initial_spans.extend(bcb_to_initial_coverage_spans(mir_body, body_span, bcb, bcb_data));
     }
@@ -50,34 +52,41 @@ pub(super) fn mir_to_initial_sorted_coverage_spans(
 // for each `Statement` and `Terminator`. (Note that subsequent stages of coverage analysis will
 // merge some `CoverageSpan`s, at which point a `CoverageSpan` may represent multiple
 // `Statement`s and/or `Terminator`s.)
-fn bcb_to_initial_coverage_spans(
-    mir_body: &mir::Body<'_>,
+fn bcb_to_initial_coverage_spans<'a, 'tcx>(
+    mir_body: &'a mir::Body<'tcx>,
     body_span: Span,
     bcb: BasicCoverageBlock,
-    bcb_data: &BasicCoverageBlockData,
-) -> Vec<CoverageSpan> {
-    bcb_data
-        .basic_blocks
-        .iter()
-        .flat_map(|&bb| {
-            let data = &mir_body[bb];
-            data.statements
-                .iter()
-                .filter_map(move |statement| {
-                    filtered_statement_span(statement).map(|span| {
-                        CoverageSpan::for_statement(
-                            statement,
-                            function_source_span(span, body_span),
-                            span,
-                            bcb,
-                        )
-                    })
-                })
-                .chain(filtered_terminator_span(data.terminator()).map(|span| {
-                    CoverageSpan::for_terminator(function_source_span(span, body_span), span, bcb)
-                }))
-        })
-        .collect()
+    bcb_data: &'a BasicCoverageBlockData,
+) -> impl Iterator<Item = CoverageSpan> + Captures<'a> + Captures<'tcx> {
+    bcb_data.basic_blocks.iter().flat_map(move |&bb| {
+        let data = &mir_body[bb];
+
+        let statement_spans = data.statements.iter().filter_map(move |statement| {
+            let expn_span = filtered_statement_span(statement)?;
+            let span = function_source_span(expn_span, body_span);
+
+            Some(CoverageSpan::new(span, expn_span, bcb, is_closure(statement)))
+        });
+
+        let terminator_span = Some(data.terminator()).into_iter().filter_map(move |terminator| {
+            let expn_span = filtered_terminator_span(terminator)?;
+            let span = function_source_span(expn_span, body_span);
+
+            Some(CoverageSpan::new(span, expn_span, bcb, false))
+        });
+
+        statement_spans.chain(terminator_span)
+    })
+}
+
+fn is_closure(statement: &Statement<'_>) -> bool {
+    match statement.kind {
+        StatementKind::Assign(box (_, Rvalue::Aggregate(box ref agg_kind, _))) => match agg_kind {
+            AggregateKind::Closure(_, _) | AggregateKind::Coroutine(_, _, _) => true,
+            _ => false,
+        },
+        _ => false,
+    }
 }
 
 /// If the MIR `Statement` has a span contributive to computing coverage spans,
@@ -160,7 +169,7 @@ fn filtered_terminator_span(terminator: &Terminator<'_>) -> Option<Span> {
         | TerminatorKind::UnwindTerminate(_)
         | TerminatorKind::Return
         | TerminatorKind::Yield { .. }
-        | TerminatorKind::GeneratorDrop
+        | TerminatorKind::CoroutineDrop
         | TerminatorKind::FalseUnwind { .. }
         | TerminatorKind::InlineAsm { .. } => {
             Some(terminator.source_info.span)
diff --git a/compiler/rustc_mir_transform/src/dest_prop.rs b/compiler/rustc_mir_transform/src/dest_prop.rs
index 99b070c018e..15502adfb5a 100644
--- a/compiler/rustc_mir_transform/src/dest_prop.rs
+++ b/compiler/rustc_mir_transform/src/dest_prop.rs
@@ -114,7 +114,7 @@
 //! approach that only works for some classes of CFGs:
 //! - rustc now has a powerful dataflow analysis framework that can handle forwards and backwards
 //!   analyses efficiently.
-//! - Layout optimizations for generators have been added to improve code generation for
+//! - Layout optimizations for coroutines have been added to improve code generation for
 //!   async/await, which are very similar in spirit to what this optimization does.
 //!
 //! Also, rustc now has a simple NRVO pass (see `nrvo.rs`), which handles a subset of the cases that
@@ -655,7 +655,7 @@ impl WriteInfo {
                 // `Drop`s create a `&mut` and so are not considered
             }
             TerminatorKind::Yield { .. }
-            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::CoroutineDrop
             | TerminatorKind::FalseEdge { .. }
             | TerminatorKind::FalseUnwind { .. } => {
                 bug!("{:?} not found in this MIR phase", terminator)
diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs
index d18fdaaf22f..59156b2427c 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs
@@ -9,9 +9,9 @@ use rustc_mir_dataflow::elaborate_drops::{elaborate_drop, DropFlagState, Unwind}
 use rustc_mir_dataflow::elaborate_drops::{DropElaborator, DropFlagMode, DropStyle};
 use rustc_mir_dataflow::impls::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
 use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex};
+use rustc_mir_dataflow::on_all_children_bits;
 use rustc_mir_dataflow::on_lookup_result_bits;
 use rustc_mir_dataflow::MoveDataParamEnv;
-use rustc_mir_dataflow::{on_all_children_bits, on_all_drop_children_bits};
 use rustc_mir_dataflow::{Analysis, ResultsCursor};
 use rustc_span::Span;
 use rustc_target::abi::{FieldIdx, VariantIdx};
@@ -54,16 +54,10 @@ impl<'tcx> MirPass<'tcx> for ElaborateDrops {
 
         let def_id = body.source.def_id();
         let param_env = tcx.param_env_reveal_all_normalized(def_id);
-        let move_data = match MoveData::gather_moves(body, tcx, param_env) {
-            Ok(move_data) => move_data,
-            Err((move_data, _)) => {
-                tcx.sess.delay_span_bug(
-                    body.span,
-                    "No `move_errors` should be allowed in MIR borrowck",
-                );
-                move_data
-            }
-        };
+        // For types that do not need dropping, the behaviour is trivial. So we only need to track
+        // init/uninit for types that do need dropping.
+        let move_data =
+            MoveData::gather_moves(&body, tcx, param_env, |ty| ty.needs_drop(tcx, param_env));
         let elaborate_patch = {
             let env = MoveDataParamEnv { move_data, param_env };
 
@@ -178,13 +172,19 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, '_, 'tcx> {
                 let mut some_live = false;
                 let mut some_dead = false;
                 let mut children_count = 0;
-                on_all_drop_children_bits(self.tcx(), self.body(), self.ctxt.env, path, |child| {
-                    let (live, dead) = self.ctxt.init_data.maybe_live_dead(child);
-                    debug!("elaborate_drop: state({:?}) = {:?}", child, (live, dead));
-                    some_live |= live;
-                    some_dead |= dead;
-                    children_count += 1;
-                });
+                on_all_children_bits(
+                    self.tcx(),
+                    self.body(),
+                    self.ctxt.move_data(),
+                    path,
+                    |child| {
+                        let (live, dead) = self.ctxt.init_data.maybe_live_dead(child);
+                        debug!("elaborate_drop: state({:?}) = {:?}", child, (live, dead));
+                        some_live |= live;
+                        some_dead |= dead;
+                        children_count += 1;
+                    },
+                );
                 ((some_live, some_dead), children_count != 1)
             }
         };
@@ -296,26 +296,36 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
     fn collect_drop_flags(&mut self) {
         for (bb, data) in self.body.basic_blocks.iter_enumerated() {
             let terminator = data.terminator();
-            let place = match terminator.kind {
-                TerminatorKind::Drop { ref place, .. } => place,
-                _ => continue,
-            };
-
-            self.init_data.seek_before(self.body.terminator_loc(bb));
+            let TerminatorKind::Drop { ref place, .. } = terminator.kind else { continue };
 
             let path = self.move_data().rev_lookup.find(place.as_ref());
             debug!("collect_drop_flags: {:?}, place {:?} ({:?})", bb, place, path);
 
-            let path = match path {
-                LookupResult::Exact(e) => e,
-                LookupResult::Parent(None) => continue,
+            match path {
+                LookupResult::Exact(path) => {
+                    self.init_data.seek_before(self.body.terminator_loc(bb));
+                    on_all_children_bits(self.tcx, self.body, self.move_data(), path, |child| {
+                        let (maybe_live, maybe_dead) = self.init_data.maybe_live_dead(child);
+                        debug!(
+                            "collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
+                            child,
+                            place,
+                            path,
+                            (maybe_live, maybe_dead)
+                        );
+                        if maybe_live && maybe_dead {
+                            self.create_drop_flag(child, terminator.source_info.span)
+                        }
+                    });
+                }
+                LookupResult::Parent(None) => {}
                 LookupResult::Parent(Some(parent)) => {
-                    let (_maybe_live, maybe_dead) = self.init_data.maybe_live_dead(parent);
-
                     if self.body.local_decls[place.local].is_deref_temp() {
                         continue;
                     }
 
+                    self.init_data.seek_before(self.body.terminator_loc(bb));
+                    let (_maybe_live, maybe_dead) = self.init_data.maybe_live_dead(parent);
                     if maybe_dead {
                         self.tcx.sess.delay_span_bug(
                             terminator.source_info.span,
@@ -324,80 +334,74 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
                             ),
                         );
                     }
-                    continue;
                 }
             };
-
-            on_all_drop_children_bits(self.tcx, self.body, self.env, path, |child| {
-                let (maybe_live, maybe_dead) = self.init_data.maybe_live_dead(child);
-                debug!(
-                    "collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
-                    child,
-                    place,
-                    path,
-                    (maybe_live, maybe_dead)
-                );
-                if maybe_live && maybe_dead {
-                    self.create_drop_flag(child, terminator.source_info.span)
-                }
-            });
         }
     }
 
     fn elaborate_drops(&mut self) {
+        // This function should mirror what `collect_drop_flags` does.
         for (bb, data) in self.body.basic_blocks.iter_enumerated() {
-            let loc = Location { block: bb, statement_index: data.statements.len() };
             let terminator = data.terminator();
+            let TerminatorKind::Drop { place, target, unwind, replace } = terminator.kind else {
+                continue;
+            };
 
-            match terminator.kind {
-                TerminatorKind::Drop { place, target, unwind, replace } => {
-                    self.init_data.seek_before(loc);
-                    match self.move_data().rev_lookup.find(place.as_ref()) {
-                        LookupResult::Exact(path) => {
-                            let unwind = if data.is_cleanup {
-                                Unwind::InCleanup
-                            } else {
-                                match unwind {
-                                    UnwindAction::Cleanup(cleanup) => Unwind::To(cleanup),
-                                    UnwindAction::Continue => Unwind::To(self.patch.resume_block()),
-                                    UnwindAction::Unreachable => {
-                                        Unwind::To(self.patch.unreachable_cleanup_block())
-                                    }
-                                    UnwindAction::Terminate(reason) => {
-                                        debug_assert_ne!(
-                                            reason,
-                                            UnwindTerminateReason::InCleanup,
-                                            "we are not in a cleanup block, InCleanup reason should be impossible"
-                                        );
-                                        Unwind::To(self.patch.terminate_block(reason))
-                                    }
-                                }
-                            };
-                            elaborate_drop(
-                                &mut Elaborator { ctxt: self },
-                                terminator.source_info,
-                                place,
-                                path,
-                                target,
-                                unwind,
-                                bb,
-                            )
+            // This place does not need dropping. It does not have an associated move-path, so the
+            // match below will conservatively keep an unconditional drop. As that drop is useless,
+            // just remove it here and now.
+            if !place
+                .ty(&self.body.local_decls, self.tcx)
+                .ty
+                .needs_drop(self.tcx, self.env.param_env)
+            {
+                self.patch.patch_terminator(bb, TerminatorKind::Goto { target });
+                continue;
+            }
+
+            let path = self.move_data().rev_lookup.find(place.as_ref());
+            match path {
+                LookupResult::Exact(path) => {
+                    let unwind = match unwind {
+                        _ if data.is_cleanup => Unwind::InCleanup,
+                        UnwindAction::Cleanup(cleanup) => Unwind::To(cleanup),
+                        UnwindAction::Continue => Unwind::To(self.patch.resume_block()),
+                        UnwindAction::Unreachable => {
+                            Unwind::To(self.patch.unreachable_cleanup_block())
                         }
-                        LookupResult::Parent(..) => {
-                            if !replace {
-                                self.tcx.sess.delay_span_bug(
-                                    terminator.source_info.span,
-                                    format!("drop of untracked value {bb:?}"),
-                                );
-                            }
-                            // A drop and replace behind a pointer/array/whatever.
-                            // The borrow checker requires that these locations are initialized before the assignment,
-                            // so we just leave an unconditional drop.
-                            assert!(!data.is_cleanup);
+                        UnwindAction::Terminate(reason) => {
+                            debug_assert_ne!(
+                                reason,
+                                UnwindTerminateReason::InCleanup,
+                                "we are not in a cleanup block, InCleanup reason should be impossible"
+                            );
+                            Unwind::To(self.patch.terminate_block(reason))
                         }
+                    };
+                    self.init_data.seek_before(self.body.terminator_loc(bb));
+                    elaborate_drop(
+                        &mut Elaborator { ctxt: self },
+                        terminator.source_info,
+                        place,
+                        path,
+                        target,
+                        unwind,
+                        bb,
+                    )
+                }
+                LookupResult::Parent(None) => {}
+                LookupResult::Parent(Some(_)) => {
+                    if !replace {
+                        self.tcx.sess.delay_span_bug(
+                            terminator.source_info.span,
+                            format!("drop of untracked value {bb:?}"),
+                        );
                     }
+                    // A drop and replace behind a pointer/array/whatever.
+                    // The borrow checker requires that these locations are initialized before the assignment,
+                    // so we just leave an unconditional drop.
+                    assert!(!data.is_cleanup);
                 }
-                _ => continue,
             }
         }
     }
diff --git a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
index d202860840c..26fcfad8287 100644
--- a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
+++ b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
@@ -58,7 +58,7 @@ fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool {
     let body_abi = match body_ty.kind() {
         ty::FnDef(..) => body_ty.fn_sig(tcx).abi(),
         ty::Closure(..) => Abi::RustCall,
-        ty::Generator(..) => Abi::Rust,
+        ty::Coroutine(..) => Abi::Rust,
         _ => span_bug!(body.span, "unexpected body ty: {:?}", body_ty),
     };
     let body_can_unwind = layout::fn_can_unwind(tcx, Some(def_id), body_abi);
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index c710e460dcb..eece7c3e834 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -383,7 +383,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     AggregateKind::Array(..)
                     | AggregateKind::Tuple
                     | AggregateKind::Closure(..)
-                    | AggregateKind::Generator(..) => FIRST_VARIANT,
+                    | AggregateKind::Coroutine(..) => FIRST_VARIANT,
                     AggregateKind::Adt(_, variant_index, _, _, None) => variant_index,
                     // Do not track unions.
                     AggregateKind::Adt(_, _, _, _, Some(_)) => return None,
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index 8f578b69694..8b33e00c63c 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -14,6 +14,7 @@ use rustc_session::config::OptLevel;
 use rustc_target::abi::FieldIdx;
 use rustc_target::spec::abi::Abi;
 
+use crate::cost_checker::CostChecker;
 use crate::simplify::{remove_dead_blocks, CfgSimplifier};
 use crate::util;
 use crate::MirPass;
@@ -22,11 +23,6 @@ use std::ops::{Range, RangeFrom};
 
 pub(crate) mod cycle;
 
-const INSTR_COST: usize = 5;
-const CALL_PENALTY: usize = 25;
-const LANDINGPAD_PENALTY: usize = 50;
-const RESUME_PENALTY: usize = 45;
-
 const TOP_DOWN_DEPTH_LIMIT: usize = 5;
 
 pub struct Inline;
@@ -79,10 +75,10 @@ fn inline<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool {
     if body.source.promoted.is_some() {
         return false;
     }
-    // Avoid inlining into generators, since their `optimized_mir` is used for layout computation,
+    // Avoid inlining into coroutines, since their `optimized_mir` is used for layout computation,
     // which can create a cycle, even when no attempt is made to inline the function in the other
     // direction.
-    if body.generator.is_some() {
+    if body.coroutine.is_some() {
         return false;
     }
 
@@ -479,13 +475,8 @@ impl<'tcx> Inliner<'tcx> {
 
         // FIXME: Give a bonus to functions with only a single caller
 
-        let mut checker = CostChecker {
-            tcx: self.tcx,
-            param_env: self.param_env,
-            instance: callsite.callee,
-            callee_body,
-            cost: 0,
-        };
+        let mut checker =
+            CostChecker::new(self.tcx, self.param_env, Some(callsite.callee), callee_body);
 
         // Traverse the MIR manually so we can account for the effects of inlining on the CFG.
         let mut work_list = vec![START_BLOCK];
@@ -530,7 +521,7 @@ impl<'tcx> Inliner<'tcx> {
         // That attribute is often applied to very large functions that exceed LLVM's (very
         // generous) inlining threshold. Such functions are very poor MIR inlining candidates.
         // Always inlining #[inline(always)] functions in MIR, on net, slows down the compiler.
-        let cost = checker.cost;
+        let cost = checker.cost();
         if cost <= threshold {
             debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
             Ok(())
@@ -803,81 +794,6 @@ impl<'tcx> Inliner<'tcx> {
     }
 }
 
-/// Verify that the callee body is compatible with the caller.
-///
-/// This visitor mostly computes the inlining cost,
-/// but also needs to verify that types match because of normalization failure.
-struct CostChecker<'b, 'tcx> {
-    tcx: TyCtxt<'tcx>,
-    param_env: ParamEnv<'tcx>,
-    cost: usize,
-    callee_body: &'b Body<'tcx>,
-    instance: ty::Instance<'tcx>,
-}
-
-impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
-    fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) {
-        // Don't count StorageLive/StorageDead in the inlining cost.
-        match statement.kind {
-            StatementKind::StorageLive(_)
-            | StatementKind::StorageDead(_)
-            | StatementKind::Deinit(_)
-            | StatementKind::Nop => {}
-            _ => self.cost += INSTR_COST,
-        }
-    }
-
-    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) {
-        let tcx = self.tcx;
-        match terminator.kind {
-            TerminatorKind::Drop { ref place, unwind, .. } => {
-                // If the place doesn't actually need dropping, treat it like a regular goto.
-                let ty = self.instance.instantiate_mir(
-                    tcx,
-                    ty::EarlyBinder::bind(&place.ty(self.callee_body, tcx).ty),
-                );
-                if ty.needs_drop(tcx, self.param_env) {
-                    self.cost += CALL_PENALTY;
-                    if let UnwindAction::Cleanup(_) = unwind {
-                        self.cost += LANDINGPAD_PENALTY;
-                    }
-                } else {
-                    self.cost += INSTR_COST;
-                }
-            }
-            TerminatorKind::Call { func: Operand::Constant(ref f), unwind, .. } => {
-                let fn_ty =
-                    self.instance.instantiate_mir(tcx, ty::EarlyBinder::bind(&f.const_.ty()));
-                self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind()
-                    && tcx.is_intrinsic(def_id)
-                {
-                    // Don't give intrinsics the extra penalty for calls
-                    INSTR_COST
-                } else {
-                    CALL_PENALTY
-                };
-                if let UnwindAction::Cleanup(_) = unwind {
-                    self.cost += LANDINGPAD_PENALTY;
-                }
-            }
-            TerminatorKind::Assert { unwind, .. } => {
-                self.cost += CALL_PENALTY;
-                if let UnwindAction::Cleanup(_) = unwind {
-                    self.cost += LANDINGPAD_PENALTY;
-                }
-            }
-            TerminatorKind::UnwindResume => self.cost += RESUME_PENALTY,
-            TerminatorKind::InlineAsm { unwind, .. } => {
-                self.cost += INSTR_COST;
-                if let UnwindAction::Cleanup(_) = unwind {
-                    self.cost += LANDINGPAD_PENALTY;
-                }
-            }
-            _ => self.cost += INSTR_COST,
-        }
-    }
-}
-
 /**
  * Integrator.
  *
@@ -1014,7 +930,7 @@ impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
         }
 
         match terminator.kind {
-            TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => bug!(),
+            TerminatorKind::CoroutineDrop | TerminatorKind::Yield { .. } => bug!(),
             TerminatorKind::Goto { ref mut target } => {
                 *target = self.map_block(*target);
             }
diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs
new file mode 100644
index 00000000000..7b918be4474
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/jump_threading.rs
@@ -0,0 +1,759 @@
+//! A jump threading optimization.
+//!
+//! This optimization seeks to replace join-then-switch control flow patterns by straight jumps
+//!    X = 0                                      X = 0
+//! ------------\      /--------              ------------
+//!    X = 1     X----X SwitchInt(X)     =>       X = 1
+//! ------------/      \--------              ------------
+//!
+//!
+//! We proceed by walking the cfg backwards starting from each `SwitchInt` terminator,
+//! looking for assignments that will turn the `SwitchInt` into a simple `Goto`.
+//!
+//! The algorithm maintains a set of replacement conditions:
+//! - `conditions[place]` contains `Condition { value, polarity: Eq, target }`
+//!   if assigning `value` to `place` turns the `SwitchInt` into `Goto { target }`.
+//! - `conditions[place]` contains `Condition { value, polarity: Ne, target }`
+//!   if assigning anything different from `value` to `place` turns the `SwitchInt`
+//!   into `Goto { target }`.
+//!
+//! In this file, we denote as `place ?= value` the existence of a replacement condition
+//! on `place` with given `value`, irrespective of the polarity and target of that
+//! replacement condition.
+//!
+//! We then walk the CFG backwards transforming the set of conditions.
+//! When we find a fulfilling assignment, we record a `ThreadingOpportunity`.
+//! All `ThreadingOpportunity`s are applied to the body, by duplicating blocks if required.
+//!
+//! The optimization search can be very heavy, as it performs a DFS on MIR starting from
+//! each `SwitchInt` terminator. To manage the complexity, we:
+//! - bound the maximum depth by a constant `MAX_BACKTRACK`;
+//! - we only traverse `Goto` terminators.
+//!
+//! We try to avoid creating irreducible control-flow by not threading through a loop header.
+//!
+//! Likewise, applying the optimisation can create a lot of new MIR, so we bound the instruction
+//! cost by `MAX_COST`.
+
+use rustc_arena::DroplessArena;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_index::bit_set::BitSet;
+use rustc_index::IndexVec;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
+use rustc_mir_dataflow::value_analysis::{Map, PlaceIndex, State, TrackElem};
+
+use crate::cost_checker::CostChecker;
+use crate::MirPass;
+
+pub struct JumpThreading;
+
+const MAX_BACKTRACK: usize = 5;
+const MAX_COST: usize = 100;
+const MAX_PLACES: usize = 100;
+
+impl<'tcx> MirPass<'tcx> for JumpThreading {
+    fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+        sess.mir_opt_level() >= 4
+    }
+
+    #[instrument(skip_all level = "debug")]
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        let def_id = body.source.def_id();
+        debug!(?def_id);
+
+        let param_env = tcx.param_env_reveal_all_normalized(def_id);
+        let map = Map::new(tcx, body, Some(MAX_PLACES));
+        let loop_headers = loop_headers(body);
+
+        let arena = DroplessArena::default();
+        let mut finder = TOFinder {
+            tcx,
+            param_env,
+            body,
+            arena: &arena,
+            map: &map,
+            loop_headers: &loop_headers,
+            opportunities: Vec::new(),
+        };
+
+        for (bb, bbdata) in body.basic_blocks.iter_enumerated() {
+            debug!(?bb, term = ?bbdata.terminator());
+            if bbdata.is_cleanup || loop_headers.contains(bb) {
+                continue;
+            }
+            let Some((discr, targets)) = bbdata.terminator().kind.as_switch() else { continue };
+            let Some(discr) = discr.place() else { continue };
+            debug!(?discr, ?bb);
+
+            let discr_ty = discr.ty(body, tcx).ty;
+            let Ok(discr_layout) = tcx.layout_of(param_env.and(discr_ty)) else { continue };
+
+            let Some(discr) = finder.map.find(discr.as_ref()) else { continue };
+            debug!(?discr);
+
+            let cost = CostChecker::new(tcx, param_env, None, body);
+
+            let mut state = State::new(ConditionSet::default(), &finder.map);
+
+            let conds = if let Some((value, then, else_)) = targets.as_static_if() {
+                let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else {
+                    continue;
+                };
+                arena.alloc_from_iter([
+                    Condition { value, polarity: Polarity::Eq, target: then },
+                    Condition { value, polarity: Polarity::Ne, target: else_ },
+                ])
+            } else {
+                arena.alloc_from_iter(targets.iter().filter_map(|(value, target)| {
+                    let value = ScalarInt::try_from_uint(value, discr_layout.size)?;
+                    Some(Condition { value, polarity: Polarity::Eq, target })
+                }))
+            };
+            let conds = ConditionSet(conds);
+            state.insert_value_idx(discr, conds, &finder.map);
+
+            finder.find_opportunity(bb, state, cost, 0);
+        }
+
+        let opportunities = finder.opportunities;
+        debug!(?opportunities);
+        if opportunities.is_empty() {
+            return;
+        }
+
+        // Verify that we do not thread through a loop header.
+        for to in opportunities.iter() {
+            assert!(to.chain.iter().all(|&block| !loop_headers.contains(block)));
+        }
+        OpportunitySet::new(body, opportunities).apply(body);
+    }
+}
+
+#[derive(Debug)]
+struct ThreadingOpportunity {
+    /// The list of `BasicBlock`s from the one that found the opportunity to the `SwitchInt`.
+    chain: Vec<BasicBlock>,
+    /// The `SwitchInt` will be replaced by `Goto { target }`.
+    target: BasicBlock,
+}
+
+struct TOFinder<'tcx, 'a> {
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    body: &'a Body<'tcx>,
+    map: &'a Map,
+    loop_headers: &'a BitSet<BasicBlock>,
+    /// We use an arena to avoid cloning the slices when cloning `state`.
+    arena: &'a DroplessArena,
+    opportunities: Vec<ThreadingOpportunity>,
+}
+
+/// Represent the following statement. If we can prove that the current local is equal/not-equal
+/// to `value`, jump to `target`.
+#[derive(Copy, Clone, Debug)]
+struct Condition {
+    value: ScalarInt,
+    polarity: Polarity,
+    target: BasicBlock,
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+enum Polarity {
+    Ne,
+    Eq,
+}
+
+impl Condition {
+    fn matches(&self, value: ScalarInt) -> bool {
+        (self.value == value) == (self.polarity == Polarity::Eq)
+    }
+
+    fn inv(mut self) -> Self {
+        self.polarity = match self.polarity {
+            Polarity::Eq => Polarity::Ne,
+            Polarity::Ne => Polarity::Eq,
+        };
+        self
+    }
+}
+
+#[derive(Copy, Clone, Debug, Default)]
+struct ConditionSet<'a>(&'a [Condition]);
+
+impl<'a> ConditionSet<'a> {
+    fn iter(self) -> impl Iterator<Item = Condition> + 'a {
+        self.0.iter().copied()
+    }
+
+    fn iter_matches(self, value: ScalarInt) -> impl Iterator<Item = Condition> + 'a {
+        self.iter().filter(move |c| c.matches(value))
+    }
+
+    fn map(self, arena: &'a DroplessArena, f: impl Fn(Condition) -> Condition) -> ConditionSet<'a> {
+        ConditionSet(arena.alloc_from_iter(self.iter().map(f)))
+    }
+}
+
+impl<'tcx, 'a> TOFinder<'tcx, 'a> {
+    fn is_empty(&self, state: &State<ConditionSet<'a>>) -> bool {
+        state.all(|cs| cs.0.is_empty())
+    }
+
+    /// Recursion entry point to find threading opportunities.
+    #[instrument(level = "trace", skip(self, cost), ret)]
+    fn find_opportunity(
+        &mut self,
+        bb: BasicBlock,
+        mut state: State<ConditionSet<'a>>,
+        mut cost: CostChecker<'_, 'tcx>,
+        depth: usize,
+    ) {
+        // Do not thread through loop headers.
+        if self.loop_headers.contains(bb) {
+            return;
+        }
+
+        debug!(cost = ?cost.cost());
+        for (statement_index, stmt) in
+            self.body.basic_blocks[bb].statements.iter().enumerate().rev()
+        {
+            if self.is_empty(&state) {
+                return;
+            }
+
+            cost.visit_statement(stmt, Location { block: bb, statement_index });
+            if cost.cost() > MAX_COST {
+                return;
+            }
+
+            // Attempt to turn the `current_condition` on `lhs` into a condition on another place.
+            self.process_statement(bb, stmt, &mut state);
+
+            // When a statement mutates a place, assignments to that place that happen
+            // above the mutation cannot fulfill a condition.
+            //   _1 = 5 // Whatever happens here, it won't change the result of a `SwitchInt`.
+            //   _1 = 6
+            if let Some((lhs, tail)) = self.mutated_statement(stmt) {
+                state.flood_with_tail_elem(lhs.as_ref(), tail, self.map, ConditionSet::default());
+            }
+        }
+
+        if self.is_empty(&state) || depth >= MAX_BACKTRACK {
+            return;
+        }
+
+        let last_non_rec = self.opportunities.len();
+
+        let predecessors = &self.body.basic_blocks.predecessors()[bb];
+        if let &[pred] = &predecessors[..] && bb != START_BLOCK {
+            let term = self.body.basic_blocks[pred].terminator();
+            match term.kind {
+                TerminatorKind::SwitchInt { ref discr, ref targets } => {
+                    self.process_switch_int(discr, targets, bb, &mut state);
+                    self.find_opportunity(pred, state, cost, depth + 1);
+                }
+                _ => self.recurse_through_terminator(pred, &state, &cost, depth),
+            }
+        } else {
+            for &pred in predecessors {
+                self.recurse_through_terminator(pred, &state, &cost, depth);
+            }
+        }
+
+        let new_tos = &mut self.opportunities[last_non_rec..];
+        debug!(?new_tos);
+
+        // Try to deduplicate threading opportunities.
+        if new_tos.len() > 1
+            && new_tos.len() == predecessors.len()
+            && predecessors
+                .iter()
+                .zip(new_tos.iter())
+                .all(|(&pred, to)| to.chain == &[pred] && to.target == new_tos[0].target)
+        {
+            // All predecessors have a threading opportunity, and they all point to the same block.
+            debug!(?new_tos, "dedup");
+            let first = &mut new_tos[0];
+            *first = ThreadingOpportunity { chain: vec![bb], target: first.target };
+            self.opportunities.truncate(last_non_rec + 1);
+            return;
+        }
+
+        for op in self.opportunities[last_non_rec..].iter_mut() {
+            op.chain.push(bb);
+        }
+    }
+
+    /// Extract the mutated place from a statement.
+    ///
+    /// This method returns the `Place` so we can flood the state in case of a partial assignment.
+    ///     (_1 as Ok).0 = _5;
+    ///     (_1 as Err).0 = _6;
+    /// We want to ensure that a `SwitchInt((_1 as Ok).0)` does not see the first assignment, as
+    /// the value may have been mangled by the second assignment.
+    ///
+    /// In case we assign to a discriminant, we return `Some(TrackElem::Discriminant)`, so we can
+    /// stop at flooding the discriminant, and preserve the variant fields.
+    ///     (_1 as Some).0 = _6;
+    ///     SetDiscriminant(_1, 1);
+    ///     switchInt((_1 as Some).0)
+    #[instrument(level = "trace", skip(self), ret)]
+    fn mutated_statement(
+        &self,
+        stmt: &Statement<'tcx>,
+    ) -> Option<(Place<'tcx>, Option<TrackElem>)> {
+        match stmt.kind {
+            StatementKind::Assign(box (place, _))
+            | StatementKind::Deinit(box place) => Some((place, None)),
+            StatementKind::SetDiscriminant { box place, variant_index: _ } => {
+                Some((place, Some(TrackElem::Discriminant)))
+            }
+            StatementKind::StorageLive(local) | StatementKind::StorageDead(local) => {
+                Some((Place::from(local), None))
+            }
+            StatementKind::Retag(..)
+            | StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(..))
+            // copy_nonoverlapping takes pointers and mutated the pointed-to value.
+            | StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(..))
+            | StatementKind::AscribeUserType(..)
+            | StatementKind::Coverage(..)
+            | StatementKind::FakeRead(..)
+            | StatementKind::ConstEvalCounter
+            | StatementKind::PlaceMention(..)
+            | StatementKind::Nop => None,
+        }
+    }
+
+    #[instrument(level = "trace", skip(self))]
+    fn process_operand(
+        &mut self,
+        bb: BasicBlock,
+        lhs: PlaceIndex,
+        rhs: &Operand<'tcx>,
+        state: &mut State<ConditionSet<'a>>,
+    ) -> Option<!> {
+        let register_opportunity = |c: Condition| {
+            debug!(?bb, ?c.target, "register");
+            self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
+        };
+
+        match rhs {
+            // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
+            Operand::Constant(constant) => {
+                let conditions = state.try_get_idx(lhs, self.map)?;
+                let constant =
+                    constant.const_.normalize(self.tcx, self.param_env).try_to_scalar_int()?;
+                conditions.iter_matches(constant).for_each(register_opportunity);
+            }
+            // Transfer the conditions on the copied rhs.
+            Operand::Move(rhs) | Operand::Copy(rhs) => {
+                let rhs = self.map.find(rhs.as_ref())?;
+                state.insert_place_idx(rhs, lhs, self.map);
+            }
+        }
+
+        None
+    }
+
+    #[instrument(level = "trace", skip(self))]
+    fn process_statement(
+        &mut self,
+        bb: BasicBlock,
+        stmt: &Statement<'tcx>,
+        state: &mut State<ConditionSet<'a>>,
+    ) -> Option<!> {
+        let register_opportunity = |c: Condition| {
+            debug!(?bb, ?c.target, "register");
+            self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
+        };
+
+        // Below, `lhs` is the return value of `mutated_statement`,
+        // the place to which `conditions` apply.
+
+        let discriminant_for_variant = |enum_ty: Ty<'tcx>, variant_index| {
+            let discr = enum_ty.discriminant_for_variant(self.tcx, variant_index)?;
+            let discr_layout = self.tcx.layout_of(self.param_env.and(discr.ty)).ok()?;
+            let scalar = ScalarInt::try_from_uint(discr.val, discr_layout.size)?;
+            Some(Operand::const_from_scalar(
+                self.tcx,
+                discr.ty,
+                scalar.into(),
+                rustc_span::DUMMY_SP,
+            ))
+        };
+
+        match &stmt.kind {
+            // If we expect `discriminant(place) ?= A`,
+            // we have an opportunity if `variant_index ?= A`.
+            StatementKind::SetDiscriminant { box place, variant_index } => {
+                let discr_target = self.map.find_discr(place.as_ref())?;
+                let enum_ty = place.ty(self.body, self.tcx).ty;
+                let discr = discriminant_for_variant(enum_ty, *variant_index)?;
+                self.process_operand(bb, discr_target, &discr, state)?;
+            }
+            // If we expect `lhs ?= true`, we have an opportunity if we assume `lhs == true`.
+            StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(
+                Operand::Copy(place) | Operand::Move(place),
+            )) => {
+                let conditions = state.try_get(place.as_ref(), self.map)?;
+                conditions.iter_matches(ScalarInt::TRUE).for_each(register_opportunity);
+            }
+            StatementKind::Assign(box (lhs_place, rhs)) => {
+                if let Some(lhs) = self.map.find(lhs_place.as_ref()) {
+                    match rhs {
+                        Rvalue::Use(operand) => self.process_operand(bb, lhs, operand, state)?,
+                        // Transfer the conditions on the copy rhs.
+                        Rvalue::CopyForDeref(rhs) => {
+                            self.process_operand(bb, lhs, &Operand::Copy(*rhs), state)?
+                        }
+                        Rvalue::Discriminant(rhs) => {
+                            let rhs = self.map.find_discr(rhs.as_ref())?;
+                            state.insert_place_idx(rhs, lhs, self.map);
+                        }
+                        // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
+                        Rvalue::Aggregate(box ref kind, ref operands) => {
+                            let agg_ty = lhs_place.ty(self.body, self.tcx).ty;
+                            let lhs = match kind {
+                                // Do not support unions.
+                                AggregateKind::Adt(.., Some(_)) => return None,
+                                AggregateKind::Adt(_, variant_index, ..) if agg_ty.is_enum() => {
+                                    if let Some(discr_target) = self.map.apply(lhs, TrackElem::Discriminant)
+                                        && let Some(discr_value) = discriminant_for_variant(agg_ty, *variant_index)
+                                    {
+                                        self.process_operand(bb, discr_target, &discr_value, state);
+                                    }
+                                    self.map.apply(lhs, TrackElem::Variant(*variant_index))?
+                                }
+                                _ => lhs,
+                            };
+                            for (field_index, operand) in operands.iter_enumerated() {
+                                if let Some(field) =
+                                    self.map.apply(lhs, TrackElem::Field(field_index))
+                                {
+                                    self.process_operand(bb, field, operand, state);
+                                }
+                            }
+                        }
+                        // Transfer the conditions on the copy rhs, after inversing polarity.
+                        Rvalue::UnaryOp(UnOp::Not, Operand::Move(place) | Operand::Copy(place)) => {
+                            let conditions = state.try_get_idx(lhs, self.map)?;
+                            let place = self.map.find(place.as_ref())?;
+                            let conds = conditions.map(self.arena, Condition::inv);
+                            state.insert_value_idx(place, conds, self.map);
+                        }
+                        // We expect `lhs ?= A`. We found `lhs = Eq(rhs, B)`.
+                        // Create a condition on `rhs ?= B`.
+                        Rvalue::BinaryOp(
+                            op,
+                            box (
+                                Operand::Move(place) | Operand::Copy(place),
+                                Operand::Constant(value),
+                            )
+                            | box (
+                                Operand::Constant(value),
+                                Operand::Move(place) | Operand::Copy(place),
+                            ),
+                        ) => {
+                            let conditions = state.try_get_idx(lhs, self.map)?;
+                            let place = self.map.find(place.as_ref())?;
+                            let equals = match op {
+                                BinOp::Eq => ScalarInt::TRUE,
+                                BinOp::Ne => ScalarInt::FALSE,
+                                _ => return None,
+                            };
+                            let value = value
+                                .const_
+                                .normalize(self.tcx, self.param_env)
+                                .try_to_scalar_int()?;
+                            let conds = conditions.map(self.arena, |c| Condition {
+                                value,
+                                polarity: if c.matches(equals) {
+                                    Polarity::Eq
+                                } else {
+                                    Polarity::Ne
+                                },
+                                ..c
+                            });
+                            state.insert_value_idx(place, conds, self.map);
+                        }
+
+                        _ => {}
+                    }
+                }
+            }
+            _ => {}
+        }
+
+        None
+    }
+
+    #[instrument(level = "trace", skip(self, cost))]
+    fn recurse_through_terminator(
+        &mut self,
+        bb: BasicBlock,
+        state: &State<ConditionSet<'a>>,
+        cost: &CostChecker<'_, 'tcx>,
+        depth: usize,
+    ) {
+        let register_opportunity = |c: Condition| {
+            debug!(?bb, ?c.target, "register");
+            self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
+        };
+
+        let term = self.body.basic_blocks[bb].terminator();
+        let place_to_flood = match term.kind {
+            // We come from a target, so those are not possible.
+            TerminatorKind::UnwindResume
+            | TerminatorKind::UnwindTerminate(_)
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable
+            | TerminatorKind::CoroutineDrop => bug!("{term:?} has no terminators"),
+            // Disallowed during optimizations.
+            TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::Yield { .. } => bug!("{term:?} invalid"),
+            // Cannot reason about inline asm.
+            TerminatorKind::InlineAsm { .. } => return,
+            // `SwitchInt` is handled specially.
+            TerminatorKind::SwitchInt { .. } => return,
+            // We can recurse, no thing particular to do.
+            TerminatorKind::Goto { .. } => None,
+            // Flood the overwritten place, and progress through.
+            TerminatorKind::Drop { place: destination, .. }
+            | TerminatorKind::Call { destination, .. } => Some(destination),
+            // Treat as an `assume(cond == expected)`.
+            TerminatorKind::Assert { ref cond, expected, .. } => {
+                if let Some(place) = cond.place()
+                    && let Some(conditions) = state.try_get(place.as_ref(), self.map)
+                {
+                    let expected = if expected { ScalarInt::TRUE } else { ScalarInt::FALSE };
+                    conditions.iter_matches(expected).for_each(register_opportunity);
+                }
+                None
+            }
+        };
+
+        // We can recurse through this terminator.
+        let mut state = state.clone();
+        if let Some(place_to_flood) = place_to_flood {
+            state.flood_with(place_to_flood.as_ref(), self.map, ConditionSet::default());
+        }
+        self.find_opportunity(bb, state, cost.clone(), depth + 1);
+    }
+
+    #[instrument(level = "trace", skip(self))]
+    fn process_switch_int(
+        &mut self,
+        discr: &Operand<'tcx>,
+        targets: &SwitchTargets,
+        target_bb: BasicBlock,
+        state: &mut State<ConditionSet<'a>>,
+    ) -> Option<!> {
+        debug_assert_ne!(target_bb, START_BLOCK);
+        debug_assert_eq!(self.body.basic_blocks.predecessors()[target_bb].len(), 1);
+
+        let discr = discr.place()?;
+        let discr_ty = discr.ty(self.body, self.tcx).ty;
+        let discr_layout = self.tcx.layout_of(self.param_env.and(discr_ty)).ok()?;
+        let conditions = state.try_get(discr.as_ref(), self.map)?;
+
+        if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) {
+            let value = ScalarInt::try_from_uint(value, discr_layout.size)?;
+            debug_assert_eq!(targets.iter().filter(|&(_, target)| target == target_bb).count(), 1);
+
+            // We are inside `target_bb`. Since we have a single predecessor, we know we passed
+            // through the `SwitchInt` before arriving here. Therefore, we know that
+            // `discr == value`. If one condition can be fulfilled by `discr == value`,
+            // that's an opportunity.
+            for c in conditions.iter_matches(value) {
+                debug!(?target_bb, ?c.target, "register");
+                self.opportunities.push(ThreadingOpportunity { chain: vec![], target: c.target });
+            }
+        } else if let Some((value, _, else_bb)) = targets.as_static_if()
+            && target_bb == else_bb
+        {
+            let value = ScalarInt::try_from_uint(value, discr_layout.size)?;
+
+            // We only know that `discr != value`. That's much weaker information than
+            // the equality we had in the previous arm. All we can conclude is that
+            // the replacement condition `discr != value` can be threaded, and nothing else.
+            for c in conditions.iter() {
+                if c.value == value && c.polarity == Polarity::Ne {
+                    debug!(?target_bb, ?c.target, "register");
+                    self.opportunities
+                        .push(ThreadingOpportunity { chain: vec![], target: c.target });
+                }
+            }
+        }
+
+        None
+    }
+}
+
+struct OpportunitySet {
+    opportunities: Vec<ThreadingOpportunity>,
+    /// For each bb, give the TOs in which it appears. The pair corresponds to the index
+    /// in `opportunities` and the index in `ThreadingOpportunity::chain`.
+    involving_tos: IndexVec<BasicBlock, Vec<(usize, usize)>>,
+    /// Cache the number of predecessors for each block, as we clear the basic block cache..
+    predecessors: IndexVec<BasicBlock, usize>,
+}
+
+impl OpportunitySet {
+    fn new(body: &Body<'_>, opportunities: Vec<ThreadingOpportunity>) -> OpportunitySet {
+        let mut involving_tos = IndexVec::from_elem(Vec::new(), &body.basic_blocks);
+        for (index, to) in opportunities.iter().enumerate() {
+            for (ibb, &bb) in to.chain.iter().enumerate() {
+                involving_tos[bb].push((index, ibb));
+            }
+            involving_tos[to.target].push((index, to.chain.len()));
+        }
+        let predecessors = predecessor_count(body);
+        OpportunitySet { opportunities, involving_tos, predecessors }
+    }
+
+    /// Apply the opportunities on the graph.
+    fn apply(&mut self, body: &mut Body<'_>) {
+        for i in 0..self.opportunities.len() {
+            self.apply_once(i, body);
+        }
+    }
+
+    #[instrument(level = "trace", skip(self, body))]
+    fn apply_once(&mut self, index: usize, body: &mut Body<'_>) {
+        debug!(?self.predecessors);
+        debug!(?self.involving_tos);
+
+        // Check that `predecessors` satisfies its invariant.
+        debug_assert_eq!(self.predecessors, predecessor_count(body));
+
+        // Remove the TO from the vector to allow modifying the other ones later.
+        let op = &mut self.opportunities[index];
+        debug!(?op);
+        let op_chain = std::mem::take(&mut op.chain);
+        let op_target = op.target;
+        debug_assert_eq!(op_chain.len(), op_chain.iter().collect::<FxHashSet<_>>().len());
+
+        let Some((current, chain)) = op_chain.split_first() else { return };
+        let basic_blocks = body.basic_blocks.as_mut();
+
+        // Invariant: the control-flow is well-formed at the end of each iteration.
+        let mut current = *current;
+        for &succ in chain {
+            debug!(?current, ?succ);
+
+            // `succ` must be a successor of `current`. If it is not, this means this TO is not
+            // satisfiable and a previous TO erased this edge, so we bail out.
+            if basic_blocks[current].terminator().successors().find(|s| *s == succ).is_none() {
+                debug!("impossible");
+                return;
+            }
+
+            // Fast path: `succ` is only used once, so we can reuse it directly.
+            if self.predecessors[succ] == 1 {
+                debug!("single");
+                current = succ;
+                continue;
+            }
+
+            let new_succ = basic_blocks.push(basic_blocks[succ].clone());
+            debug!(?new_succ);
+
+            // Replace `succ` by `new_succ` where it appears.
+            let mut num_edges = 0;
+            for s in basic_blocks[current].terminator_mut().successors_mut() {
+                if *s == succ {
+                    *s = new_succ;
+                    num_edges += 1;
+                }
+            }
+
+            // Update predecessors with the new block.
+            let _new_succ = self.predecessors.push(num_edges);
+            debug_assert_eq!(new_succ, _new_succ);
+            self.predecessors[succ] -= num_edges;
+            self.update_predecessor_count(basic_blocks[new_succ].terminator(), Update::Incr);
+
+            // Replace the `current -> succ` edge by `current -> new_succ` in all the following
+            // TOs. This is necessary to avoid trying to thread through a non-existing edge. We
+            // use `involving_tos` here to avoid traversing the full set of TOs on each iteration.
+            let mut new_involved = Vec::new();
+            for &(to_index, in_to_index) in &self.involving_tos[current] {
+                // That TO has already been applied, do nothing.
+                if to_index <= index {
+                    continue;
+                }
+
+                let other_to = &mut self.opportunities[to_index];
+                if other_to.chain.get(in_to_index) != Some(&current) {
+                    continue;
+                }
+                let s = other_to.chain.get_mut(in_to_index + 1).unwrap_or(&mut other_to.target);
+                if *s == succ {
+                    // `other_to` references the `current -> succ` edge, so replace `succ`.
+                    *s = new_succ;
+                    new_involved.push((to_index, in_to_index + 1));
+                }
+            }
+
+            // The TOs that we just updated now reference `new_succ`. Update `involving_tos`
+            // in case we need to duplicate an edge starting at `new_succ` later.
+            let _new_succ = self.involving_tos.push(new_involved);
+            debug_assert_eq!(new_succ, _new_succ);
+
+            current = new_succ;
+        }
+
+        let current = &mut basic_blocks[current];
+        self.update_predecessor_count(current.terminator(), Update::Decr);
+        current.terminator_mut().kind = TerminatorKind::Goto { target: op_target };
+        self.predecessors[op_target] += 1;
+    }
+
+    fn update_predecessor_count(&mut self, terminator: &Terminator<'_>, incr: Update) {
+        match incr {
+            Update::Incr => {
+                for s in terminator.successors() {
+                    self.predecessors[s] += 1;
+                }
+            }
+            Update::Decr => {
+                for s in terminator.successors() {
+                    self.predecessors[s] -= 1;
+                }
+            }
+        }
+    }
+}
+
+fn predecessor_count(body: &Body<'_>) -> IndexVec<BasicBlock, usize> {
+    let mut predecessors: IndexVec<_, _> =
+        body.basic_blocks.predecessors().iter().map(|ps| ps.len()).collect();
+    predecessors[START_BLOCK] += 1; // Account for the implicit entry edge.
+    predecessors
+}
+
+enum Update {
+    Incr,
+    Decr,
+}
+
+/// Compute the set of loop headers in the given body. We define a loop header as a block which has
+/// at least a predecessor which it dominates. This definition is only correct for reducible CFGs.
+/// But if the CFG is already irreducible, there is no point in trying much harder.
+/// is already irreducible.
+fn loop_headers(body: &Body<'_>) -> BitSet<BasicBlock> {
+    let mut loop_headers = BitSet::new_empty(body.basic_blocks.len());
+    let dominators = body.basic_blocks.dominators();
+    // Only visit reachable blocks.
+    for (bb, bbdata) in traversal::preorder(body) {
+        for succ in bbdata.terminator().successors() {
+            if dominators.dominates(succ, bb) {
+                loop_headers.insert(succ);
+            }
+        }
+    }
+    loop_headers
+}
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
index d579420ecb8..9aaa54110bd 100644
--- a/compiler/rustc_mir_transform/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -61,6 +61,8 @@ mod const_goto;
 mod const_prop;
 mod const_prop_lint;
 mod copy_prop;
+mod coroutine;
+mod cost_checker;
 mod coverage;
 mod cross_crate_inline;
 mod ctfe_limit;
@@ -77,10 +79,10 @@ mod elaborate_drops;
 mod errors;
 mod ffi_unwind_calls;
 mod function_item_references;
-mod generator;
 mod gvn;
 pub mod inline;
 mod instsimplify;
+mod jump_threading;
 mod large_enums;
 mod lower_intrinsics;
 mod lower_slice_len;
@@ -132,7 +134,7 @@ pub fn provide(providers: &mut Providers) {
         mir_promoted,
         mir_drops_elaborated_and_const_checked,
         mir_for_ctfe,
-        mir_generator_witnesses: generator::mir_generator_witnesses,
+        mir_coroutine_witnesses: coroutine::mir_coroutine_witnesses,
         optimized_mir,
         is_mir_available,
         is_ctfe_mir_available: |tcx, did| is_mir_available(tcx, did),
@@ -375,8 +377,8 @@ fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> {
 /// mir borrowck *before* doing so in order to ensure that borrowck can be run and doesn't
 /// end up missing the source MIR due to stealing happening.
 fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> {
-    if let DefKind::Generator = tcx.def_kind(def) {
-        tcx.ensure_with_value().mir_generator_witnesses(def);
+    if let DefKind::Coroutine = tcx.def_kind(def) {
+        tcx.ensure_with_value().mir_coroutine_witnesses(def);
     }
     let mir_borrowck = tcx.mir_borrowck(def);
 
@@ -509,7 +511,7 @@ fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         // `AddRetag` needs to run after `ElaborateDrops`. Otherwise it should run fairly late,
         // but before optimizations begin.
         &elaborate_box_derefs::ElaborateBoxDerefs,
-        &generator::StateTransform,
+        &coroutine::StateTransform,
         &add_retag::AddRetag,
         &Lint(const_prop_lint::ConstPropLint),
     ];
@@ -571,6 +573,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
             &dataflow_const_prop::DataflowConstProp,
             &const_debuginfo::ConstDebugInfo,
             &o1(simplify_branches::SimplifyConstCondition::AfterConstProp),
+            &jump_threading::JumpThreading,
             &early_otherwise_branch::EarlyOtherwiseBranch,
             &simplify_comparison_integral::SimplifyComparisonIntegral,
             &dead_store_elimination::DeadStoreElimination,
diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
index 8c48a667786..54892442c87 100644
--- a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
+++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
@@ -69,7 +69,7 @@ impl RemoveNoopLandingPads {
             | TerminatorKind::FalseUnwind { .. } => {
                 terminator.successors().all(|succ| nop_landing_pads.contains(succ))
             }
-            TerminatorKind::GeneratorDrop
+            TerminatorKind::CoroutineDrop
             | TerminatorKind::Yield { .. }
             | TerminatorKind::Return
             | TerminatorKind::UnwindTerminate(_)
diff --git a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
index 26384974798..87fee2410ec 100644
--- a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
+++ b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
@@ -24,11 +24,8 @@ pub struct RemoveUninitDrops;
 impl<'tcx> MirPass<'tcx> for RemoveUninitDrops {
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         let param_env = tcx.param_env(body.source.def_id());
-        let Ok(move_data) = MoveData::gather_moves(body, tcx, param_env) else {
-            // We could continue if there are move errors, but there's not much point since our
-            // init data isn't complete.
-            return;
-        };
+        let move_data =
+            MoveData::gather_moves(&body, tcx, param_env, |ty| ty.needs_drop(tcx, param_env));
 
         let mdpe = MoveDataParamEnv { move_data, param_env };
         let mut maybe_inits = MaybeInitializedPlaces::new(tcx, body, &mdpe)
diff --git a/compiler/rustc_mir_transform/src/remove_zsts.rs b/compiler/rustc_mir_transform/src/remove_zsts.rs
index b4784b69f7f..5aa3c3cfe4d 100644
--- a/compiler/rustc_mir_transform/src/remove_zsts.rs
+++ b/compiler/rustc_mir_transform/src/remove_zsts.rs
@@ -13,8 +13,8 @@ impl<'tcx> MirPass<'tcx> for RemoveZsts {
     }
 
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-        // Avoid query cycles (generators require optimized MIR for layout).
-        if tcx.type_of(body.source.def_id()).instantiate_identity().is_generator() {
+        // Avoid query cycles (coroutines require optimized MIR for layout).
+        if tcx.type_of(body.source.def_id()).instantiate_identity().is_coroutine() {
             return;
         }
         let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
diff --git a/compiler/rustc_mir_transform/src/separate_const_switch.rs b/compiler/rustc_mir_transform/src/separate_const_switch.rs
index e1e4acccccd..907cfe7581a 100644
--- a/compiler/rustc_mir_transform/src/separate_const_switch.rs
+++ b/compiler/rustc_mir_transform/src/separate_const_switch.rs
@@ -118,7 +118,7 @@ pub fn separate_const_switch(body: &mut Body<'_>) -> usize {
                         | TerminatorKind::Return
                         | TerminatorKind::Unreachable
                         | TerminatorKind::InlineAsm { .. }
-                        | TerminatorKind::GeneratorDrop => {
+                        | TerminatorKind::CoroutineDrop => {
                             continue 'predec_iter;
                         }
                     }
@@ -169,7 +169,7 @@ pub fn separate_const_switch(body: &mut Body<'_>) -> usize {
             | TerminatorKind::UnwindTerminate(_)
             | TerminatorKind::Return
             | TerminatorKind::Unreachable
-            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::CoroutineDrop
             | TerminatorKind::Assert { .. }
             | TerminatorKind::FalseUnwind { .. }
             | TerminatorKind::Drop { .. }
diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs
index e9895d97dfe..2400cfa21fb 100644
--- a/compiler/rustc_mir_transform/src/shim.rs
+++ b/compiler/rustc_mir_transform/src/shim.rs
@@ -4,7 +4,7 @@ use rustc_hir::lang_items::LangItem;
 use rustc_middle::mir::*;
 use rustc_middle::query::Providers;
 use rustc_middle::ty::GenericArgs;
-use rustc_middle::ty::{self, EarlyBinder, GeneratorArgs, Ty, TyCtxt};
+use rustc_middle::ty::{self, CoroutineArgs, EarlyBinder, Ty, TyCtxt};
 use rustc_target::abi::{FieldIdx, VariantIdx, FIRST_VARIANT};
 
 use rustc_index::{Idx, IndexVec};
@@ -67,10 +67,10 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'
         }
 
         ty::InstanceDef::DropGlue(def_id, ty) => {
-            // FIXME(#91576): Drop shims for generators aren't subject to the MIR passes at the end
+            // FIXME(#91576): Drop shims for coroutines aren't subject to the MIR passes at the end
             // of this function. Is this intentional?
-            if let Some(ty::Generator(gen_def_id, args, _)) = ty.map(Ty::kind) {
-                let body = tcx.optimized_mir(*gen_def_id).generator_drop().unwrap();
+            if let Some(ty::Coroutine(gen_def_id, args, _)) = ty.map(Ty::kind) {
+                let body = tcx.optimized_mir(*gen_def_id).coroutine_drop().unwrap();
                 let mut body = EarlyBinder::bind(body.clone()).instantiate(tcx, args);
                 debug!("make_shim({:?}) = {:?}", instance, body);
 
@@ -171,7 +171,7 @@ fn local_decls_for_sig<'tcx>(
 fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>) -> Body<'tcx> {
     debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty);
 
-    assert!(!matches!(ty, Some(ty) if ty.is_generator()));
+    assert!(!matches!(ty, Some(ty) if ty.is_coroutine()));
 
     let args = if let Some(ty) = ty {
         tcx.mk_args(&[ty.into()])
@@ -392,8 +392,8 @@ fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -
         _ if is_copy => builder.copy_shim(),
         ty::Closure(_, args) => builder.tuple_like_shim(dest, src, args.as_closure().upvar_tys()),
         ty::Tuple(..) => builder.tuple_like_shim(dest, src, self_ty.tuple_fields()),
-        ty::Generator(gen_def_id, args, hir::Movability::Movable) => {
-            builder.generator_shim(dest, src, *gen_def_id, args.as_generator())
+        ty::Coroutine(gen_def_id, args, hir::Movability::Movable) => {
+            builder.coroutine_shim(dest, src, *gen_def_id, args.as_coroutine())
         }
         _ => bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty),
     };
@@ -593,12 +593,12 @@ impl<'tcx> CloneShimBuilder<'tcx> {
         let _final_cleanup_block = self.clone_fields(dest, src, target, unwind, tys);
     }
 
-    fn generator_shim(
+    fn coroutine_shim(
         &mut self,
         dest: Place<'tcx>,
         src: Place<'tcx>,
         gen_def_id: DefId,
-        args: GeneratorArgs<'tcx>,
+        args: CoroutineArgs<'tcx>,
     ) {
         self.block(vec![], TerminatorKind::Goto { target: self.block_index_offset(3) }, false);
         let unwind = self.block(vec![], TerminatorKind::UnwindResume, true);
diff --git a/compiler/rustc_mir_transform/src/sroa.rs b/compiler/rustc_mir_transform/src/sroa.rs
index c21b1724cbb..427cc1e1924 100644
--- a/compiler/rustc_mir_transform/src/sroa.rs
+++ b/compiler/rustc_mir_transform/src/sroa.rs
@@ -20,8 +20,8 @@ impl<'tcx> MirPass<'tcx> for ScalarReplacementOfAggregates {
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         debug!(def_id = ?body.source.def_id());
 
-        // Avoid query cycles (generators require optimized MIR for layout).
-        if tcx.type_of(body.source.def_id()).instantiate_identity().is_generator() {
+        // Avoid query cycles (coroutines require optimized MIR for layout).
+        if tcx.type_of(body.source.def_id()).instantiate_identity().is_coroutine() {
             return;
         }