about summary refs log tree commit diff
path: root/compiler/rustc_mir_transform
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_mir_transform')
-rw-r--r--compiler/rustc_mir_transform/Cargo.toml2
-rw-r--r--compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs2
-rw-r--r--compiler/rustc_mir_transform/src/add_subtyping_projections.rs3
-rw-r--r--compiler/rustc_mir_transform/src/check_const_item_mutation.rs2
-rw-r--r--compiler/rustc_mir_transform/src/check_inline.rs2
-rw-r--r--compiler/rustc_mir_transform/src/check_pointers.rs5
-rw-r--r--compiler/rustc_mir_transform/src/check_undefined_transmutes.rs2
-rw-r--r--compiler/rustc_mir_transform/src/coroutine.rs18
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters.rs113
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters/node_flow.rs25
-rw-r--r--compiler/rustc_mir_transform/src/coverage/graph.rs15
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mappings.rs61
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs168
-rw-r--r--compiler/rustc_mir_transform/src/coverage/query.rs181
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs3
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs3
-rw-r--r--compiler/rustc_mir_transform/src/deduplicate_blocks.rs195
-rw-r--r--compiler/rustc_mir_transform/src/deref_separator.rs3
-rw-r--r--compiler/rustc_mir_transform/src/early_otherwise_branch.rs2
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_box_derefs.rs3
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drop.rs1058
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drops.rs19
-rw-r--r--compiler/rustc_mir_transform/src/ffi_unwind_calls.rs4
-rw-r--r--compiler/rustc_mir_transform/src/function_item_references.rs9
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs11
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs7
-rw-r--r--compiler/rustc_mir_transform/src/instsimplify.rs2
-rw-r--r--compiler/rustc_mir_transform/src/jump_threading.rs6
-rw-r--r--compiler/rustc_mir_transform/src/known_panics_lint.rs6
-rw-r--r--compiler/rustc_mir_transform/src/large_enums.rs186
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs12
-rw-r--r--compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs2
-rw-r--r--compiler/rustc_mir_transform/src/match_branches.rs2
-rw-r--r--compiler/rustc_mir_transform/src/mentioned_items.rs2
-rw-r--r--compiler/rustc_mir_transform/src/patch.rs285
-rw-r--r--compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs3
-rw-r--r--compiler/rustc_mir_transform/src/shim.rs11
-rw-r--r--compiler/rustc_mir_transform/src/simplify.rs6
-rw-r--r--compiler/rustc_mir_transform/src/simplify_branches.rs2
-rw-r--r--compiler/rustc_mir_transform/src/simplify_comparison_integral.rs4
-rw-r--r--compiler/rustc_mir_transform/src/single_use_consts.rs8
-rw-r--r--compiler/rustc_mir_transform/src/sroa.rs5
-rw-r--r--compiler/rustc_mir_transform/src/ssa.rs2
-rw-r--r--compiler/rustc_mir_transform/src/unreachable_enum_branching.rs3
-rw-r--r--compiler/rustc_mir_transform/src/unreachable_prop.rs3
45 files changed, 1671 insertions, 795 deletions
diff --git a/compiler/rustc_mir_transform/Cargo.toml b/compiler/rustc_mir_transform/Cargo.toml
index 2f233f787f0..fb8d0ac5e74 100644
--- a/compiler/rustc_mir_transform/Cargo.toml
+++ b/compiler/rustc_mir_transform/Cargo.toml
@@ -1,7 +1,7 @@
 [package]
 name = "rustc_mir_transform"
 version = "0.0.0"
-edition = "2021"
+edition = "2024"
 
 [dependencies]
 # tidy-alphabetical-start
diff --git a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs
index 8716fd1c098..b33326cb873 100644
--- a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs
+++ b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs
@@ -1,8 +1,8 @@
-use rustc_middle::mir::patch::MirPatch;
 use rustc_middle::mir::*;
 use rustc_middle::ty::{self, TyCtxt};
 use tracing::debug;
 
+use crate::patch::MirPatch;
 use crate::util;
 
 /// This pass moves values being dropped that are within a packed
diff --git a/compiler/rustc_mir_transform/src/add_subtyping_projections.rs b/compiler/rustc_mir_transform/src/add_subtyping_projections.rs
index e41f47db545..b5cd4133459 100644
--- a/compiler/rustc_mir_transform/src/add_subtyping_projections.rs
+++ b/compiler/rustc_mir_transform/src/add_subtyping_projections.rs
@@ -1,8 +1,9 @@
-use rustc_middle::mir::patch::MirPatch;
 use rustc_middle::mir::visit::MutVisitor;
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
 
+use crate::patch::MirPatch;
+
 pub(super) struct Subtyper;
 
 struct SubTypeChecker<'a, 'tcx> {
diff --git a/compiler/rustc_mir_transform/src/check_const_item_mutation.rs b/compiler/rustc_mir_transform/src/check_const_item_mutation.rs
index 3affe4abbfa..ceea72c6755 100644
--- a/compiler/rustc_mir_transform/src/check_const_item_mutation.rs
+++ b/compiler/rustc_mir_transform/src/check_const_item_mutation.rs
@@ -79,7 +79,7 @@ impl<'tcx> ConstMutationChecker<'_, 'tcx> {
             let lint_root = self.body.source_scopes[source_info.scope]
                 .local_data
                 .as_ref()
-                .assert_crate_local()
+                .unwrap_crate_local()
                 .lint_root;
 
             Some((lint_root, source_info.span, self.tcx.def_span(const_item)))
diff --git a/compiler/rustc_mir_transform/src/check_inline.rs b/compiler/rustc_mir_transform/src/check_inline.rs
index 497f4a660ea..83c3cda5a50 100644
--- a/compiler/rustc_mir_transform/src/check_inline.rs
+++ b/compiler/rustc_mir_transform/src/check_inline.rs
@@ -16,7 +16,7 @@ pub(super) struct CheckForceInline;
 impl<'tcx> MirLint<'tcx> for CheckForceInline {
     fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
         let def_id = body.source.def_id();
-        if !tcx.hir().body_owner_kind(def_id).is_fn_or_closure() || !def_id.is_local() {
+        if !tcx.hir_body_owner_kind(def_id).is_fn_or_closure() || !def_id.is_local() {
             return;
         }
         let InlineAttr::Force { attr_span, .. } = tcx.codegen_fn_attrs(def_id).inline else {
diff --git a/compiler/rustc_mir_transform/src/check_pointers.rs b/compiler/rustc_mir_transform/src/check_pointers.rs
index ccaa83fd9e2..2d04b621935 100644
--- a/compiler/rustc_mir_transform/src/check_pointers.rs
+++ b/compiler/rustc_mir_transform/src/check_pointers.rs
@@ -71,8 +71,7 @@ pub(crate) fn check_pointers<'tcx, F>(
     // statements/blocks after. Iterating or visiting the MIR in order would require updating
     // our current location after every insertion. By iterating backwards, we dodge this issue:
     // The only Locations that an insertion changes have already been handled.
-    for block in (0..basic_blocks.len()).rev() {
-        let block = block.into();
+    for block in basic_blocks.indices().rev() {
         for statement_index in (0..basic_blocks[block].statements.len()).rev() {
             let location = Location { block, statement_index };
             let statement = &basic_blocks[block].statements[statement_index];
@@ -190,7 +189,7 @@ impl<'a, 'tcx> Visitor<'tcx> for PointerFinder<'a, 'tcx> {
         let pointer_ty = self.local_decls[place.local].ty;
 
         // We only want to check places based on raw pointers
-        if !pointer_ty.is_unsafe_ptr() {
+        if !pointer_ty.is_raw_ptr() {
             trace!("Indirect, but not based on an raw ptr, not checking {:?}", place);
             return;
         }
diff --git a/compiler/rustc_mir_transform/src/check_undefined_transmutes.rs b/compiler/rustc_mir_transform/src/check_undefined_transmutes.rs
index 8ba14a1158e..ed3b1ae4f42 100644
--- a/compiler/rustc_mir_transform/src/check_undefined_transmutes.rs
+++ b/compiler/rustc_mir_transform/src/check_undefined_transmutes.rs
@@ -47,7 +47,7 @@ impl<'a, 'tcx> UndefinedTransmutesChecker<'a, 'tcx> {
         {
             let fn_sig = function.ty(self.body, self.tcx).fn_sig(self.tcx).skip_binder();
             if let [input] = fn_sig.inputs() {
-                return input.is_unsafe_ptr() && fn_sig.output().is_integral();
+                return input.is_raw_ptr() && fn_sig.output().is_integral();
             }
         }
         false
diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs
index a9bdbeb9cb8..04d96f11707 100644
--- a/compiler/rustc_mir_transform/src/coroutine.rs
+++ b/compiler/rustc_mir_transform/src/coroutine.rs
@@ -393,12 +393,13 @@ impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> {
 
     fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
         // Remove StorageLive and StorageDead statements for remapped locals
-        data.retain_statements(|s| match s.kind {
-            StatementKind::StorageLive(l) | StatementKind::StorageDead(l) => {
-                !self.remap.contains(l)
+        for s in &mut data.statements {
+            if let StatementKind::StorageLive(l) | StatementKind::StorageDead(l) = s.kind
+                && self.remap.contains(l)
+            {
+                s.make_nop();
             }
-            _ => true,
-        });
+        }
 
         let ret_val = match data.terminator().kind {
             TerminatorKind::Return => {
@@ -944,7 +945,7 @@ fn compute_layout<'tcx>(
         let decl = &body.local_decls[local];
         debug!(?decl);
 
-        // Do not `assert_crate_local` here, as post-borrowck cleanup may have already cleared
+        // Do not `unwrap_crate_local` here, as post-borrowck cleanup may have already cleared
         // the information. This is alright, since `ignore_for_traits` is only relevant when
         // this code runs on pre-cleanup MIR, and `ignore_for_traits = false` is the safer
         // default.
@@ -1061,9 +1062,8 @@ fn insert_switch<'tcx>(
 }
 
 fn elaborate_coroutine_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-    use rustc_middle::mir::patch::MirPatch;
-    use rustc_mir_dataflow::elaborate_drops::{Unwind, elaborate_drop};
-
+    use crate::elaborate_drop::{Unwind, elaborate_drop};
+    use crate::patch::MirPatch;
     use crate::shim::DropShimElaborator;
 
     // Note that `elaborate_drops` only drops the upvars of a coroutine, and
diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs
index adb99a75a9e..5568d42ab8f 100644
--- a/compiler/rustc_mir_transform/src/coverage/counters.rs
+++ b/compiler/rustc_mir_transform/src/coverage/counters.rs
@@ -2,7 +2,6 @@ use std::cmp::Ordering;
 
 use either::Either;
 use itertools::Itertools;
-use rustc_data_structures::captures::Captures;
 use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
 use rustc_data_structures::graph::DirectedGraph;
 use rustc_index::IndexVec;
@@ -11,31 +10,35 @@ use rustc_middle::mir::coverage::{CounterId, CovTerm, Expression, ExpressionId,
 
 use crate::coverage::counters::balanced_flow::BalancedFlowGraph;
 use crate::coverage::counters::node_flow::{
-    CounterTerm, NodeCounters, make_node_counters, node_flow_data_for_balanced_graph,
+    CounterTerm, NodeCounters, NodeFlowData, node_flow_data_for_balanced_graph,
 };
 use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph};
 
 mod balanced_flow;
-mod node_flow;
+pub(crate) mod node_flow;
 mod union_find;
 
-/// Ensures that each BCB node needing a counter has one, by creating physical
-/// counters or counter expressions for nodes as required.
-pub(super) fn make_bcb_counters(
-    graph: &CoverageGraph,
-    bcb_needs_counter: &DenseBitSet<BasicCoverageBlock>,
-) -> CoverageCounters {
+/// Struct containing the results of [`prepare_bcb_counters_data`].
+pub(crate) struct BcbCountersData {
+    pub(crate) node_flow_data: NodeFlowData<BasicCoverageBlock>,
+    pub(crate) priority_list: Vec<BasicCoverageBlock>,
+}
+
+/// Analyzes the coverage graph to create intermediate data structures that
+/// will later be used (during codegen) to create physical counters or counter
+/// expressions for each BCB node that needs one.
+pub(crate) fn prepare_bcb_counters_data(graph: &CoverageGraph) -> BcbCountersData {
     // Create the derived graphs that are necessary for subsequent steps.
     let balanced_graph = BalancedFlowGraph::for_graph(graph, |n| !graph[n].is_out_summable);
     let node_flow_data = node_flow_data_for_balanced_graph(&balanced_graph);
 
-    // Use those graphs to determine which nodes get physical counters, and how
-    // to compute the execution counts of other nodes from those counters.
+    // Also create a "priority list" of coverage graph nodes, to help determine
+    // which ones get physical counters or counter expressions. This needs to
+    // be done now, because the later parts of the counter-creation process
+    // won't have access to the original coverage graph.
     let priority_list = make_node_flow_priority_list(graph, balanced_graph);
-    let node_counters = make_node_counters(&node_flow_data, &priority_list);
 
-    // Convert the counters into a form suitable for embedding into MIR.
-    transcribe_counters(&node_counters, bcb_needs_counter)
+    BcbCountersData { node_flow_data, priority_list }
 }
 
 /// Arranges the nodes in `balanced_graph` into a list, such that earlier nodes
@@ -47,7 +50,7 @@ fn make_node_flow_priority_list(
     // A "reloop" node has exactly one out-edge, which jumps back to the top
     // of an enclosing loop. Reloop nodes are typically visited more times
     // than loop-exit nodes, so try to avoid giving them physical counters.
-    let is_reloop_node = IndexVec::from_fn_n(
+    let is_reloop_node = IndexVec::<BasicCoverageBlock, _>::from_fn_n(
         |node| match graph.successors[node].as_slice() {
             &[succ] => graph.dominates(succ, node),
             _ => false,
@@ -74,31 +77,33 @@ fn make_node_flow_priority_list(
 }
 
 // Converts node counters into a form suitable for embedding into MIR.
-fn transcribe_counters(
+pub(crate) fn transcribe_counters(
     old: &NodeCounters<BasicCoverageBlock>,
     bcb_needs_counter: &DenseBitSet<BasicCoverageBlock>,
+    bcbs_seen: &DenseBitSet<BasicCoverageBlock>,
 ) -> CoverageCounters {
     let mut new = CoverageCounters::with_num_bcbs(bcb_needs_counter.domain_size());
 
     for bcb in bcb_needs_counter.iter() {
+        if !bcbs_seen.contains(bcb) {
+            // This BCB's code was removed by MIR opts, so its counter is always zero.
+            new.set_node_counter(bcb, CovTerm::Zero);
+            continue;
+        }
+
         // Our counter-creation algorithm doesn't guarantee that a node's list
         // of terms starts or ends with a positive term, so partition the
         // counters into "positive" and "negative" lists for easier handling.
-        let (mut pos, mut neg): (Vec<_>, Vec<_>) =
-            old.counter_terms[bcb].iter().partition_map(|&CounterTerm { node, op }| match op {
+        let (mut pos, mut neg): (Vec<_>, Vec<_>) = old.counter_terms[bcb]
+            .iter()
+            // Filter out any BCBs that were removed by MIR opts;
+            // this treats them as having an execution count of 0.
+            .filter(|term| bcbs_seen.contains(term.node))
+            .partition_map(|&CounterTerm { node, op }| match op {
                 Op::Add => Either::Left(node),
                 Op::Subtract => Either::Right(node),
             });
 
-        if pos.is_empty() {
-            // If we somehow end up with no positive terms, fall back to
-            // creating a physical counter. There's no known way for this
-            // to happen, but we can avoid an ICE if it does.
-            debug_assert!(false, "{bcb:?} has no positive counter terms");
-            pos = vec![bcb];
-            neg = vec![];
-        }
-
         // These intermediate sorts are not strictly necessary, but were helpful
         // in reducing churn when switching to the current counter-creation scheme.
         // They also help to slightly decrease the overall size of the expression
@@ -109,14 +114,10 @@ fn transcribe_counters(
         let mut new_counters_for_sites = |sites: Vec<BasicCoverageBlock>| {
             sites.into_iter().map(|node| new.ensure_phys_counter(node)).collect::<Vec<_>>()
         };
-        let mut pos = new_counters_for_sites(pos);
-        let mut neg = new_counters_for_sites(neg);
-
-        // These sorts are also not strictly necessary; see above.
-        pos.sort();
-        neg.sort();
+        let pos = new_counters_for_sites(pos);
+        let neg = new_counters_for_sites(neg);
 
-        let pos_counter = new.make_sum(&pos).expect("`pos` should not be empty");
+        let pos_counter = new.make_sum(&pos).unwrap_or(CovTerm::Zero);
         let new_counter = new.make_subtracted_sum(pos_counter, &neg);
         new.set_node_counter(bcb, new_counter);
     }
@@ -129,15 +130,15 @@ fn transcribe_counters(
 pub(super) struct CoverageCounters {
     /// List of places where a counter-increment statement should be injected
     /// into MIR, each with its corresponding counter ID.
-    phys_counter_for_node: FxIndexMap<BasicCoverageBlock, CounterId>,
-    next_counter_id: CounterId,
+    pub(crate) phys_counter_for_node: FxIndexMap<BasicCoverageBlock, CounterId>,
+    pub(crate) next_counter_id: CounterId,
 
     /// Coverage counters/expressions that are associated with individual BCBs.
-    node_counters: IndexVec<BasicCoverageBlock, Option<CovTerm>>,
+    pub(crate) node_counters: IndexVec<BasicCoverageBlock, Option<CovTerm>>,
 
     /// Table of expression data, associating each expression ID with its
     /// corresponding operator (+ or -) and its LHS/RHS operands.
-    expressions: IndexVec<ExpressionId, Expression>,
+    pub(crate) expressions: IndexVec<ExpressionId, Expression>,
     /// Remember expressions that have already been created (or simplified),
     /// so that we don't create unnecessary duplicates.
     expressions_memo: FxHashMap<Expression, CovTerm>,
@@ -188,12 +189,6 @@ impl CoverageCounters {
         self.make_expression(lhs, Op::Subtract, rhs_sum)
     }
 
-    pub(super) fn num_counters(&self) -> usize {
-        let num_counters = self.phys_counter_for_node.len();
-        assert_eq!(num_counters, self.next_counter_id.as_usize());
-        num_counters
-    }
-
     fn set_node_counter(&mut self, bcb: BasicCoverageBlock, counter: CovTerm) -> CovTerm {
         let existing = self.node_counters[bcb].replace(counter);
         assert!(
@@ -202,34 +197,4 @@ impl CoverageCounters {
         );
         counter
     }
-
-    pub(super) fn term_for_bcb(&self, bcb: BasicCoverageBlock) -> Option<CovTerm> {
-        self.node_counters[bcb]
-    }
-
-    /// Returns an iterator over all the nodes in the coverage graph that
-    /// should have a counter-increment statement injected into MIR, along with
-    /// each site's corresponding counter ID.
-    pub(super) fn counter_increment_sites(
-        &self,
-    ) -> impl Iterator<Item = (CounterId, BasicCoverageBlock)> + Captures<'_> {
-        self.phys_counter_for_node.iter().map(|(&site, &id)| (id, site))
-    }
-
-    /// Returns an iterator over the subset of BCB nodes that have been associated
-    /// with a counter *expression*, along with the ID of that expression.
-    pub(super) fn bcb_nodes_with_coverage_expressions(
-        &self,
-    ) -> impl Iterator<Item = (BasicCoverageBlock, ExpressionId)> + Captures<'_> {
-        self.node_counters.iter_enumerated().filter_map(|(bcb, &counter)| match counter {
-            // Yield the BCB along with its associated expression ID.
-            Some(CovTerm::Expression(id)) => Some((bcb, id)),
-            // This BCB is associated with a counter or nothing, so skip it.
-            Some(CovTerm::Counter { .. } | CovTerm::Zero) | None => None,
-        })
-    }
-
-    pub(super) fn into_expressions(self) -> IndexVec<ExpressionId, Expression> {
-        self.expressions
-    }
 }
diff --git a/compiler/rustc_mir_transform/src/coverage/counters/node_flow.rs b/compiler/rustc_mir_transform/src/coverage/counters/node_flow.rs
index 9d80b3af42d..91ed54b8b59 100644
--- a/compiler/rustc_mir_transform/src/coverage/counters/node_flow.rs
+++ b/compiler/rustc_mir_transform/src/coverage/counters/node_flow.rs
@@ -9,6 +9,7 @@
 use rustc_data_structures::graph;
 use rustc_index::bit_set::DenseBitSet;
 use rustc_index::{Idx, IndexSlice, IndexVec};
+pub(crate) use rustc_middle::mir::coverage::NodeFlowData;
 use rustc_middle::mir::coverage::Op;
 
 use crate::coverage::counters::union_find::UnionFind;
@@ -16,30 +17,6 @@ use crate::coverage::counters::union_find::UnionFind;
 #[cfg(test)]
 mod tests;
 
-/// Data representing a view of some underlying graph, in which each node's
-/// successors have been merged into a single "supernode".
-///
-/// The resulting supernodes have no obvious meaning on their own.
-/// However, merging successor nodes means that a node's out-edges can all
-/// be combined into a single out-edge, whose flow is the same as the flow
-/// (execution count) of its corresponding node in the original graph.
-///
-/// With all node flows now in the original graph now represented as edge flows
-/// in the merged graph, it becomes possible to analyze the original node flows
-/// using techniques for analyzing edge flows.
-#[derive(Debug)]
-pub(crate) struct NodeFlowData<Node: Idx> {
-    /// Maps each node to the supernode that contains it, indicated by some
-    /// arbitrary "root" node that is part of that supernode.
-    supernodes: IndexVec<Node, Node>,
-    /// For each node, stores the single supernode that all of its successors
-    /// have been merged into.
-    ///
-    /// (Note that each node in a supernode can potentially have a _different_
-    /// successor supernode from its peers.)
-    succ_supernodes: IndexVec<Node, Node>,
-}
-
 /// Creates a "merged" view of an underlying graph.
 ///
 /// The given graph is assumed to have [“balanced flow”](balanced-flow),
diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs
index 392b54c8d81..dcc7c5b91d7 100644
--- a/compiler/rustc_mir_transform/src/coverage/graph.rs
+++ b/compiler/rustc_mir_transform/src/coverage/graph.rs
@@ -2,12 +2,12 @@ use std::cmp::Ordering;
 use std::ops::{Index, IndexMut};
 use std::{mem, slice};
 
-use rustc_data_structures::captures::Captures;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_data_structures::graph::dominators::Dominators;
 use rustc_data_structures::graph::{self, DirectedGraph, StartNode};
 use rustc_index::IndexVec;
 use rustc_index::bit_set::DenseBitSet;
+pub(crate) use rustc_middle::mir::coverage::{BasicCoverageBlock, START_BCB};
 use rustc_middle::mir::{self, BasicBlock, Terminator, TerminatorKind};
 use tracing::debug;
 
@@ -42,7 +42,7 @@ impl CoverageGraph {
         // `SwitchInt` to have multiple targets to the same destination `BasicBlock`, so
         // de-duplication is required. This is done without reordering the successors.
 
-        let successors = IndexVec::from_fn_n(
+        let successors = IndexVec::<BasicCoverageBlock, _>::from_fn_n(
             |bcb| {
                 let mut seen_bcbs = FxHashSet::default();
                 let terminator = mir_body[bcbs[bcb].last_bb()].terminator();
@@ -217,7 +217,7 @@ impl CoverageGraph {
     pub(crate) fn reloop_predecessors(
         &self,
         to_bcb: BasicCoverageBlock,
-    ) -> impl Iterator<Item = BasicCoverageBlock> + Captures<'_> {
+    ) -> impl Iterator<Item = BasicCoverageBlock> {
         self.predecessors[to_bcb].iter().copied().filter(move |&pred| self.dominates(to_bcb, pred))
     }
 }
@@ -269,15 +269,6 @@ impl graph::Predecessors for CoverageGraph {
     }
 }
 
-rustc_index::newtype_index! {
-    /// A node in the control-flow graph of CoverageGraph.
-    #[orderable]
-    #[debug_format = "bcb{}"]
-    pub(crate) struct BasicCoverageBlock {
-        const START_BCB = 0;
-    }
-}
-
 /// `BasicCoverageBlockData` holds the data indexed by a `BasicCoverageBlock`.
 ///
 /// A `BasicCoverageBlock` (BCB) represents the maximal-length sequence of MIR `BasicBlock`s without
diff --git a/compiler/rustc_mir_transform/src/coverage/mappings.rs b/compiler/rustc_mir_transform/src/coverage/mappings.rs
index 8d0d92dc367..d83c0d40a7e 100644
--- a/compiler/rustc_mir_transform/src/coverage/mappings.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mappings.rs
@@ -1,9 +1,7 @@
 use std::collections::BTreeSet;
 
 use rustc_data_structures::fx::FxIndexMap;
-use rustc_data_structures::graph::DirectedGraph;
 use rustc_index::IndexVec;
-use rustc_index::bit_set::DenseBitSet;
 use rustc_middle::mir::coverage::{
     BlockMarkerId, BranchSpan, ConditionId, ConditionInfo, CoverageInfoHi, CoverageKind,
 };
@@ -63,10 +61,6 @@ const MCDC_MAX_BITMAP_SIZE: usize = i32::MAX as usize;
 
 #[derive(Default)]
 pub(super) struct ExtractedMappings {
-    /// Store our own copy of [`CoverageGraph::num_nodes`], so that we don't
-    /// need access to the whole graph when allocating per-BCB data. This is
-    /// only public so that other code can still use exhaustive destructuring.
-    pub(super) num_bcbs: usize,
     pub(super) code_mappings: Vec<CodeMapping>,
     pub(super) branch_pairs: Vec<BranchPair>,
     pub(super) mcdc_bitmap_bits: usize,
@@ -118,7 +112,6 @@ pub(super) fn extract_all_mapping_info_from_mir<'tcx>(
     );
 
     ExtractedMappings {
-        num_bcbs: graph.num_nodes(),
         code_mappings,
         branch_pairs,
         mcdc_bitmap_bits,
@@ -127,60 +120,6 @@ pub(super) fn extract_all_mapping_info_from_mir<'tcx>(
     }
 }
 
-impl ExtractedMappings {
-    pub(super) fn all_bcbs_with_counter_mappings(&self) -> DenseBitSet<BasicCoverageBlock> {
-        // Fully destructure self to make sure we don't miss any fields that have mappings.
-        let Self {
-            num_bcbs,
-            code_mappings,
-            branch_pairs,
-            mcdc_bitmap_bits: _,
-            mcdc_degraded_branches,
-            mcdc_mappings,
-        } = self;
-
-        // Identify which BCBs have one or more mappings.
-        let mut bcbs_with_counter_mappings = DenseBitSet::new_empty(*num_bcbs);
-        let mut insert = |bcb| {
-            bcbs_with_counter_mappings.insert(bcb);
-        };
-
-        for &CodeMapping { span: _, bcb } in code_mappings {
-            insert(bcb);
-        }
-        for &BranchPair { true_bcb, false_bcb, .. } in branch_pairs {
-            insert(true_bcb);
-            insert(false_bcb);
-        }
-        for &MCDCBranch { true_bcb, false_bcb, .. } in mcdc_degraded_branches
-            .iter()
-            .chain(mcdc_mappings.iter().map(|(_, branches)| branches.into_iter()).flatten())
-        {
-            insert(true_bcb);
-            insert(false_bcb);
-        }
-
-        // MC/DC decisions refer to BCBs, but don't require those BCBs to have counters.
-        if bcbs_with_counter_mappings.is_empty() {
-            debug_assert!(
-                mcdc_mappings.is_empty(),
-                "A function with no counter mappings shouldn't have any decisions: {mcdc_mappings:?}",
-            );
-        }
-
-        bcbs_with_counter_mappings
-    }
-
-    /// Returns the set of BCBs that have one or more `Code` mappings.
-    pub(super) fn bcbs_with_ordinary_code_mappings(&self) -> DenseBitSet<BasicCoverageBlock> {
-        let mut bcbs = DenseBitSet::new_empty(self.num_bcbs);
-        for &CodeMapping { span: _, bcb } in &self.code_mappings {
-            bcbs.insert(bcb);
-        }
-        bcbs
-    }
-}
-
 fn resolve_block_markers(
     coverage_info_hi: &CoverageInfoHi,
     mir_body: &mir::Body<'_>,
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index 15487d05a30..1ccae0fd7fe 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -10,7 +10,6 @@ mod unexpand;
 
 use rustc_hir as hir;
 use rustc_hir::intravisit::{Visitor, walk_expr};
-use rustc_middle::hir::map::Map;
 use rustc_middle::hir::nested_filter;
 use rustc_middle::mir::coverage::{
     CoverageKind, DecisionInfo, FunctionCoverageInfo, Mapping, MappingKind,
@@ -21,7 +20,7 @@ use rustc_span::Span;
 use rustc_span::def_id::LocalDefId;
 use tracing::{debug, debug_span, trace};
 
-use crate::coverage::counters::CoverageCounters;
+use crate::coverage::counters::BcbCountersData;
 use crate::coverage::graph::CoverageGraph;
 use crate::coverage::mappings::ExtractedMappings;
 
@@ -82,28 +81,21 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
     let extracted_mappings =
         mappings::extract_all_mapping_info_from_mir(tcx, mir_body, &hir_info, &graph);
 
-    ////////////////////////////////////////////////////
-    // Create an optimized mix of `Counter`s and `Expression`s for the `CoverageGraph`. Ensure
-    // every coverage span has a `Counter` or `Expression` assigned to its `BasicCoverageBlock`
-    // and all `Expression` dependencies (operands) are also generated, for any other
-    // `BasicCoverageBlock`s not already associated with a coverage span.
-    let bcbs_with_counter_mappings = extracted_mappings.all_bcbs_with_counter_mappings();
-    if bcbs_with_counter_mappings.is_empty() {
-        // No relevant spans were found in MIR, so skip instrumenting this function.
-        return;
-    }
-
-    let coverage_counters = counters::make_bcb_counters(&graph, &bcbs_with_counter_mappings);
-
-    let mappings = create_mappings(&extracted_mappings, &coverage_counters);
+    let mappings = create_mappings(&extracted_mappings);
     if mappings.is_empty() {
         // No spans could be converted into valid mappings, so skip this function.
         debug!("no spans could be converted into valid mappings; skipping");
         return;
     }
 
-    inject_coverage_statements(mir_body, &graph, &extracted_mappings, &coverage_counters);
+    // Use the coverage graph to prepare intermediate data that will eventually
+    // be used to assign physical counters and counter expressions to points in
+    // the control-flow graph
+    let BcbCountersData { node_flow_data, priority_list } =
+        counters::prepare_bcb_counters_data(&graph);
 
+    // Inject coverage statements into MIR.
+    inject_coverage_statements(mir_body, &graph);
     inject_mcdc_statements(mir_body, &graph, &extracted_mappings);
 
     let mcdc_num_condition_bitmaps = extracted_mappings
@@ -116,29 +108,25 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
     mir_body.function_coverage_info = Some(Box::new(FunctionCoverageInfo {
         function_source_hash: hir_info.function_source_hash,
         body_span: hir_info.body_span,
-        num_counters: coverage_counters.num_counters(),
-        mcdc_bitmap_bits: extracted_mappings.mcdc_bitmap_bits,
-        expressions: coverage_counters.into_expressions(),
+
+        node_flow_data,
+        priority_list,
+
         mappings,
+
+        mcdc_bitmap_bits: extracted_mappings.mcdc_bitmap_bits,
         mcdc_num_condition_bitmaps,
     }));
 }
 
-/// For each coverage span extracted from MIR, create a corresponding
-/// mapping.
+/// For each coverage span extracted from MIR, create a corresponding mapping.
 ///
-/// Precondition: All BCBs corresponding to those spans have been given
-/// coverage counters.
-fn create_mappings(
-    extracted_mappings: &ExtractedMappings,
-    coverage_counters: &CoverageCounters,
-) -> Vec<Mapping> {
-    let term_for_bcb =
-        |bcb| coverage_counters.term_for_bcb(bcb).expect("all BCBs with spans were given counters");
-
+/// FIXME(Zalathar): This used to be where BCBs in the extracted mappings were
+/// resolved to a `CovTerm`. But that is now handled elsewhere, so this
+/// function can potentially be simplified even further.
+fn create_mappings(extracted_mappings: &ExtractedMappings) -> Vec<Mapping> {
     // Fully destructure the mappings struct to make sure we don't miss any kinds.
     let ExtractedMappings {
-        num_bcbs: _,
         code_mappings,
         branch_pairs,
         mcdc_bitmap_bits: _,
@@ -150,23 +138,18 @@ fn create_mappings(
     mappings.extend(code_mappings.iter().map(
         // Ordinary code mappings are the simplest kind.
         |&mappings::CodeMapping { span, bcb }| {
-            let kind = MappingKind::Code(term_for_bcb(bcb));
+            let kind = MappingKind::Code { bcb };
             Mapping { kind, span }
         },
     ));
 
     mappings.extend(branch_pairs.iter().map(
         |&mappings::BranchPair { span, true_bcb, false_bcb }| {
-            let true_term = term_for_bcb(true_bcb);
-            let false_term = term_for_bcb(false_bcb);
-            let kind = MappingKind::Branch { true_term, false_term };
+            let kind = MappingKind::Branch { true_bcb, false_bcb };
             Mapping { kind, span }
         },
     ));
 
-    let term_for_bcb =
-        |bcb| coverage_counters.term_for_bcb(bcb).expect("all BCBs with spans were given counters");
-
     // MCDC branch mappings are appended with their decisions in case decisions were ignored.
     mappings.extend(mcdc_degraded_branches.iter().map(
         |&mappings::MCDCBranch {
@@ -176,11 +159,7 @@ fn create_mappings(
              condition_info: _,
              true_index: _,
              false_index: _,
-         }| {
-            let true_term = term_for_bcb(true_bcb);
-            let false_term = term_for_bcb(false_bcb);
-            Mapping { kind: MappingKind::Branch { true_term, false_term }, span }
-        },
+         }| { Mapping { kind: MappingKind::Branch { true_bcb, false_bcb }, span } },
     ));
 
     for (decision, branches) in mcdc_mappings {
@@ -201,12 +180,10 @@ fn create_mappings(
                      true_index: _,
                      false_index: _,
                  }| {
-                    let true_term = term_for_bcb(true_bcb);
-                    let false_term = term_for_bcb(false_bcb);
                     Mapping {
                         kind: MappingKind::MCDCBranch {
-                            true_term,
-                            false_term,
+                            true_bcb,
+                            false_bcb,
                             mcdc_params: condition_info,
                         },
                         span,
@@ -227,41 +204,11 @@ fn create_mappings(
     mappings
 }
 
-/// For each BCB node or BCB edge that has an associated coverage counter,
-/// inject any necessary coverage statements into MIR.
-fn inject_coverage_statements<'tcx>(
-    mir_body: &mut mir::Body<'tcx>,
-    graph: &CoverageGraph,
-    extracted_mappings: &ExtractedMappings,
-    coverage_counters: &CoverageCounters,
-) {
-    // Inject counter-increment statements into MIR.
-    for (id, bcb) in coverage_counters.counter_increment_sites() {
-        let target_bb = graph[bcb].leader_bb();
-        inject_statement(mir_body, CoverageKind::CounterIncrement { id }, target_bb);
-    }
-
-    // For each counter expression that is directly associated with at least one
-    // span, we inject an "expression-used" statement, so that coverage codegen
-    // can check whether the injected statement survived MIR optimization.
-    // (BCB edges can't have spans, so we only need to process BCB nodes here.)
-    //
-    // We only do this for ordinary `Code` mappings, because branch and MC/DC
-    // mappings might have expressions that don't correspond to any single
-    // point in the control-flow graph.
-    //
-    // See the code in `rustc_codegen_llvm::coverageinfo::map_data` that deals
-    // with "expressions seen" and "zero terms".
-    let eligible_bcbs = extracted_mappings.bcbs_with_ordinary_code_mappings();
-    for (bcb, expression_id) in coverage_counters
-        .bcb_nodes_with_coverage_expressions()
-        .filter(|&(bcb, _)| eligible_bcbs.contains(bcb))
-    {
-        inject_statement(
-            mir_body,
-            CoverageKind::ExpressionUsed { id: expression_id },
-            graph[bcb].leader_bb(),
-        );
+/// Inject any necessary coverage statements into MIR, so that they influence codegen.
+fn inject_coverage_statements<'tcx>(mir_body: &mut mir::Body<'tcx>, graph: &CoverageGraph) {
+    for (bcb, data) in graph.iter_enumerated() {
+        let target_bb = data.leader_bb();
+        inject_statement(mir_body, CoverageKind::VirtualCounter { bcb }, target_bb);
     }
 }
 
@@ -343,7 +290,7 @@ fn extract_hir_info<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> ExtractedHir
 
     let hir_node = tcx.hir_node_by_def_id(def_id);
     let fn_body_id = hir_node.body_id().expect("HIR node is a function with body");
-    let hir_body = tcx.hir().body(fn_body_id);
+    let hir_body = tcx.hir_body(fn_body_id);
 
     let maybe_fn_sig = hir_node.fn_sig();
     let is_async_fn = maybe_fn_sig.is_some_and(|fn_sig| fn_sig.header.is_async());
@@ -399,35 +346,37 @@ fn extract_hole_spans_from_hir<'tcx>(
     body_span: Span, // Usually `hir_body.value.span`, but not always
     hir_body: &hir::Body<'tcx>,
 ) -> Vec<Span> {
-    struct HolesVisitor<'hir, F> {
-        hir: Map<'hir>,
-        visit_hole_span: F,
+    struct HolesVisitor<'tcx> {
+        tcx: TyCtxt<'tcx>,
+        body_span: Span,
+        hole_spans: Vec<Span>,
     }
 
-    impl<'hir, F: FnMut(Span)> Visitor<'hir> for HolesVisitor<'hir, F> {
-        /// - We need `NestedFilter::INTRA = true` so that `visit_item` will be called.
-        /// - Bodies of nested items don't actually get visited, because of the
-        ///   `visit_item` override.
-        /// - For nested bodies that are not part of an item, we do want to visit any
-        ///   items contained within them.
-        type NestedFilter = nested_filter::All;
+    impl<'tcx> Visitor<'tcx> for HolesVisitor<'tcx> {
+        /// We have special handling for nested items, but we still want to
+        /// traverse into nested bodies of things that are not considered items,
+        /// such as "anon consts" (e.g. array lengths).
+        type NestedFilter = nested_filter::OnlyBodies;
 
-        fn nested_visit_map(&mut self) -> Self::Map {
-            self.hir
+        fn maybe_tcx(&mut self) -> TyCtxt<'tcx> {
+            self.tcx
         }
 
-        fn visit_item(&mut self, item: &'hir hir::Item<'hir>) {
-            (self.visit_hole_span)(item.span);
+        /// We override `visit_nested_item` instead of `visit_item` because we
+        /// only need the item's span, not the item itself.
+        fn visit_nested_item(&mut self, id: hir::ItemId) -> Self::Result {
+            let span = self.tcx.def_span(id.owner_id.def_id);
+            self.visit_hole_span(span);
             // Having visited this item, we don't care about its children,
             // so don't call `walk_item`.
         }
 
         // We override `visit_expr` instead of the more specific expression
         // visitors, so that we have direct access to the expression span.
-        fn visit_expr(&mut self, expr: &'hir hir::Expr<'hir>) {
+        fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
             match expr.kind {
                 hir::ExprKind::Closure(_) | hir::ExprKind::ConstBlock(_) => {
-                    (self.visit_hole_span)(expr.span);
+                    self.visit_hole_span(expr.span);
                     // Having visited this expression, we don't care about its
                     // children, so don't call `walk_expr`.
                 }
@@ -437,18 +386,17 @@ fn extract_hole_spans_from_hir<'tcx>(
             }
         }
     }
-
-    let mut hole_spans = vec![];
-    let mut visitor = HolesVisitor {
-        hir: tcx.hir(),
-        visit_hole_span: |hole_span| {
+    impl HolesVisitor<'_> {
+        fn visit_hole_span(&mut self, hole_span: Span) {
             // Discard any holes that aren't directly visible within the body span.
-            if body_span.contains(hole_span) && body_span.eq_ctxt(hole_span) {
-                hole_spans.push(hole_span);
+            if self.body_span.contains(hole_span) && self.body_span.eq_ctxt(hole_span) {
+                self.hole_spans.push(hole_span);
             }
-        },
-    };
+        }
+    }
+
+    let mut visitor = HolesVisitor { tcx, body_span, hole_spans: vec![] };
 
     visitor.visit_body(hir_body);
-    hole_spans
+    visitor.hole_spans
 }
diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs
index a849ed4c3e2..ccf76dc7108 100644
--- a/compiler/rustc_mir_transform/src/coverage/query.rs
+++ b/compiler/rustc_mir_transform/src/coverage/query.rs
@@ -1,10 +1,6 @@
-use rustc_data_structures::captures::Captures;
 use rustc_index::bit_set::DenseBitSet;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
-use rustc_middle::mir::coverage::{
-    CounterId, CovTerm, CoverageIdsInfo, CoverageKind, Expression, ExpressionId,
-    FunctionCoverageInfo, MappingKind, Op,
-};
+use rustc_middle::mir::coverage::{BasicCoverageBlock, CoverageIdsInfo, CoverageKind, MappingKind};
 use rustc_middle::mir::{Body, Statement, StatementKind};
 use rustc_middle::ty::{self, TyCtxt};
 use rustc_middle::util::Providers;
@@ -12,6 +8,9 @@ use rustc_span::def_id::LocalDefId;
 use rustc_span::sym;
 use tracing::trace;
 
+use crate::coverage::counters::node_flow::make_node_counters;
+use crate::coverage::counters::{CoverageCounters, transcribe_counters};
+
 /// Registers query/hook implementations related to coverage.
 pub(crate) fn provide(providers: &mut Providers) {
     providers.hooks.is_eligible_for_coverage = is_eligible_for_coverage;
@@ -66,7 +65,7 @@ fn coverage_attr_on(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
             Some(_) | None => {
                 // Other possibilities should have been rejected by `rustc_parse::validate_attr`.
                 // Use `span_delayed_bug` to avoid an ICE in failing builds (#127880).
-                tcx.dcx().span_delayed_bug(attr.span, "unexpected value of coverage attribute");
+                tcx.dcx().span_delayed_bug(attr.span(), "unexpected value of coverage attribute");
             }
         }
     }
@@ -89,44 +88,71 @@ fn coverage_ids_info<'tcx>(
     let mir_body = tcx.instance_mir(instance_def);
     let fn_cov_info = mir_body.function_coverage_info.as_deref()?;
 
-    let mut counters_seen = DenseBitSet::new_empty(fn_cov_info.num_counters);
-    let mut expressions_seen = DenseBitSet::new_filled(fn_cov_info.expressions.len());
-
-    // For each expression ID that is directly used by one or more mappings,
-    // mark it as not-yet-seen. This indicates that we expect to see a
-    // corresponding `ExpressionUsed` statement during MIR traversal.
-    for mapping in fn_cov_info.mappings.iter() {
-        // Currently we only worry about ordinary code mappings.
-        // For branch and MC/DC mappings, expressions might not correspond
-        // to any particular point in the control-flow graph.
-        // (Keep this in sync with the injection of `ExpressionUsed`
-        // statements in the `InstrumentCoverage` MIR pass.)
-        if let MappingKind::Code(CovTerm::Expression(id)) = mapping.kind {
-            expressions_seen.remove(id);
-        }
-    }
-
+    // Scan through the final MIR to see which BCBs survived MIR opts.
+    // Any BCB not in this set was optimized away.
+    let mut bcbs_seen = DenseBitSet::new_empty(fn_cov_info.priority_list.len());
     for kind in all_coverage_in_mir_body(mir_body) {
         match *kind {
-            CoverageKind::CounterIncrement { id } => {
-                counters_seen.insert(id);
-            }
-            CoverageKind::ExpressionUsed { id } => {
-                expressions_seen.insert(id);
+            CoverageKind::VirtualCounter { bcb } => {
+                bcbs_seen.insert(bcb);
             }
             _ => {}
         }
     }
 
-    let zero_expressions =
-        identify_zero_expressions(fn_cov_info, &counters_seen, &expressions_seen);
+    // Determine the set of BCBs that are referred to by mappings, and therefore
+    // need a counter. Any node not in this set will only get a counter if it
+    // is part of the counter expression for a node that is in the set.
+    let mut bcb_needs_counter =
+        DenseBitSet::<BasicCoverageBlock>::new_empty(fn_cov_info.priority_list.len());
+    for mapping in &fn_cov_info.mappings {
+        match mapping.kind {
+            MappingKind::Code { bcb } => {
+                bcb_needs_counter.insert(bcb);
+            }
+            MappingKind::Branch { true_bcb, false_bcb } => {
+                bcb_needs_counter.insert(true_bcb);
+                bcb_needs_counter.insert(false_bcb);
+            }
+            MappingKind::MCDCBranch { true_bcb, false_bcb, mcdc_params: _ } => {
+                bcb_needs_counter.insert(true_bcb);
+                bcb_needs_counter.insert(false_bcb);
+            }
+            MappingKind::MCDCDecision(_) => {}
+        }
+    }
 
-    Some(CoverageIdsInfo { counters_seen, zero_expressions })
+    // Clone the priority list so that we can re-sort it.
+    let mut priority_list = fn_cov_info.priority_list.clone();
+    // The first ID in the priority list represents the synthetic "sink" node,
+    // and must remain first so that it _never_ gets a physical counter.
+    debug_assert_eq!(priority_list[0], priority_list.iter().copied().max().unwrap());
+    assert!(!bcbs_seen.contains(priority_list[0]));
+    // Partition the priority list, so that unreachable nodes (removed by MIR opts)
+    // are sorted later and therefore are _more_ likely to get a physical counter.
+    // This is counter-intuitive, but it means that `transcribe_counters` can
+    // easily skip those unused physical counters and replace them with zero.
+    // (The original ordering remains in effect within both partitions.)
+    priority_list[1..].sort_by_key(|&bcb| !bcbs_seen.contains(bcb));
+
+    let node_counters = make_node_counters(&fn_cov_info.node_flow_data, &priority_list);
+    let coverage_counters = transcribe_counters(&node_counters, &bcb_needs_counter, &bcbs_seen);
+
+    let CoverageCounters {
+        phys_counter_for_node, next_counter_id, node_counters, expressions, ..
+    } = coverage_counters;
+
+    Some(CoverageIdsInfo {
+        num_counters: next_counter_id.as_u32(),
+        phys_counter_for_node,
+        term_for_bcb: node_counters,
+        expressions,
+    })
 }
 
 fn all_coverage_in_mir_body<'a, 'tcx>(
     body: &'a Body<'tcx>,
-) -> impl Iterator<Item = &'a CoverageKind> + Captures<'tcx> {
+) -> impl Iterator<Item = &'a CoverageKind> {
     body.basic_blocks.iter().flat_map(|bb_data| &bb_data.statements).filter_map(|statement| {
         match statement.kind {
             StatementKind::Coverage(ref kind) if !is_inlined(body, statement) => Some(kind),
@@ -139,94 +165,3 @@ fn is_inlined(body: &Body<'_>, statement: &Statement<'_>) -> bool {
     let scope_data = &body.source_scopes[statement.source_info.scope];
     scope_data.inlined.is_some() || scope_data.inlined_parent_scope.is_some()
 }
-
-/// Identify expressions that will always have a value of zero, and note their
-/// IDs in a `DenseBitSet`. Mappings that refer to a zero expression can instead
-/// become mappings to a constant zero value.
-///
-/// This function mainly exists to preserve the simplifications that were
-/// already being performed by the Rust-side expression renumbering, so that
-/// the resulting coverage mappings don't get worse.
-fn identify_zero_expressions(
-    fn_cov_info: &FunctionCoverageInfo,
-    counters_seen: &DenseBitSet<CounterId>,
-    expressions_seen: &DenseBitSet<ExpressionId>,
-) -> DenseBitSet<ExpressionId> {
-    // The set of expressions that either were optimized out entirely, or
-    // have zero as both of their operands, and will therefore always have
-    // a value of zero. Other expressions that refer to these as operands
-    // can have those operands replaced with `CovTerm::Zero`.
-    let mut zero_expressions = DenseBitSet::new_empty(fn_cov_info.expressions.len());
-
-    // Simplify a copy of each expression based on lower-numbered expressions,
-    // and then update the set of always-zero expressions if necessary.
-    // (By construction, expressions can only refer to other expressions
-    // that have lower IDs, so one pass is sufficient.)
-    for (id, expression) in fn_cov_info.expressions.iter_enumerated() {
-        if !expressions_seen.contains(id) {
-            // If an expression was not seen, it must have been optimized away,
-            // so any operand that refers to it can be replaced with zero.
-            zero_expressions.insert(id);
-            continue;
-        }
-
-        // We don't need to simplify the actual expression data in the
-        // expressions list; we can just simplify a temporary copy and then
-        // use that to update the set of always-zero expressions.
-        let Expression { mut lhs, op, mut rhs } = *expression;
-
-        // If an expression has an operand that is also an expression, the
-        // operand's ID must be strictly lower. This is what lets us find
-        // all zero expressions in one pass.
-        let assert_operand_expression_is_lower = |operand_id: ExpressionId| {
-            assert!(
-                operand_id < id,
-                "Operand {operand_id:?} should be less than {id:?} in {expression:?}",
-            )
-        };
-
-        // If an operand refers to a counter or expression that is always
-        // zero, then that operand can be replaced with `CovTerm::Zero`.
-        let maybe_set_operand_to_zero = |operand: &mut CovTerm| {
-            if let CovTerm::Expression(id) = *operand {
-                assert_operand_expression_is_lower(id);
-            }
-
-            if is_zero_term(&counters_seen, &zero_expressions, *operand) {
-                *operand = CovTerm::Zero;
-            }
-        };
-        maybe_set_operand_to_zero(&mut lhs);
-        maybe_set_operand_to_zero(&mut rhs);
-
-        // Coverage counter values cannot be negative, so if an expression
-        // involves subtraction from zero, assume that its RHS must also be zero.
-        // (Do this after simplifications that could set the LHS to zero.)
-        if lhs == CovTerm::Zero && op == Op::Subtract {
-            rhs = CovTerm::Zero;
-        }
-
-        // After the above simplifications, if both operands are zero, then
-        // we know that this expression is always zero too.
-        if lhs == CovTerm::Zero && rhs == CovTerm::Zero {
-            zero_expressions.insert(id);
-        }
-    }
-
-    zero_expressions
-}
-
-/// Returns `true` if the given term is known to have a value of zero, taking
-/// into account knowledge of which counters are unused and which expressions
-/// are always zero.
-fn is_zero_term(
-    counters_seen: &DenseBitSet<CounterId>,
-    zero_expressions: &DenseBitSet<ExpressionId>,
-    term: CovTerm,
-) -> bool {
-    match term {
-        CovTerm::Zero => true,
-        CovTerm::Counter(id) => !counters_seen.contains(id),
-        CovTerm::Expression(id) => zero_expressions.contains(id),
-    }
-}
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
index 314a86ea52f..b9ed6984ddb 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -1,6 +1,5 @@
 use std::collections::VecDeque;
 
-use rustc_data_structures::captures::Captures;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_middle::mir;
 use rustc_span::{DesugaringKind, ExpnKind, MacroKind, Span};
@@ -182,7 +181,7 @@ fn divide_spans_into_buckets(input_covspans: Vec<Covspan>, holes: &[Hole]) -> Ve
 fn drain_front_while<'a, T>(
     queue: &'a mut VecDeque<T>,
     mut pred_fn: impl FnMut(&T) -> bool,
-) -> impl Iterator<Item = T> + Captures<'a> {
+) -> impl Iterator<Item = T> {
     std::iter::from_fn(move || if pred_fn(queue.front()?) { queue.pop_front() } else { None })
 }
 
diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
index 26ce743be36..73b68d7155c 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
@@ -137,8 +137,7 @@ fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
 
         // These coverage statements should not exist prior to coverage instrumentation.
         StatementKind::Coverage(
-            CoverageKind::CounterIncrement { .. }
-            | CoverageKind::ExpressionUsed { .. }
+            CoverageKind::VirtualCounter { .. }
             | CoverageKind::CondBitmapUpdate { .. }
             | CoverageKind::TestVectorBitmapUpdate { .. },
         ) => bug!(
diff --git a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs
deleted file mode 100644
index 63257df66fb..00000000000
--- a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs
+++ /dev/null
@@ -1,195 +0,0 @@
-//! This pass finds basic blocks that are completely equal,
-//! and replaces all uses with just one of them.
-
-use std::collections::hash_map::Entry;
-use std::hash::{Hash, Hasher};
-use std::iter;
-
-use rustc_data_structures::fx::FxHashMap;
-use rustc_middle::mir::visit::MutVisitor;
-use rustc_middle::mir::*;
-use rustc_middle::ty::TyCtxt;
-use tracing::debug;
-
-use super::simplify::simplify_cfg;
-
-pub(super) struct DeduplicateBlocks;
-
-impl<'tcx> crate::MirPass<'tcx> for DeduplicateBlocks {
-    fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
-        sess.mir_opt_level() >= 4
-    }
-
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-        debug!("Running DeduplicateBlocks on `{:?}`", body.source);
-        let duplicates = find_duplicates(body);
-        let has_opts_to_apply = !duplicates.is_empty();
-
-        if has_opts_to_apply {
-            let mut opt_applier = OptApplier { tcx, duplicates };
-            opt_applier.visit_body(body);
-            simplify_cfg(body);
-        }
-    }
-
-    fn is_required(&self) -> bool {
-        false
-    }
-}
-
-struct OptApplier<'tcx> {
-    tcx: TyCtxt<'tcx>,
-    duplicates: FxHashMap<BasicBlock, BasicBlock>,
-}
-
-impl<'tcx> MutVisitor<'tcx> for OptApplier<'tcx> {
-    fn tcx(&self) -> TyCtxt<'tcx> {
-        self.tcx
-    }
-
-    fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) {
-        for target in terminator.successors_mut() {
-            if let Some(replacement) = self.duplicates.get(target) {
-                debug!("SUCCESS: Replacing: `{:?}` with `{:?}`", target, replacement);
-                *target = *replacement;
-            }
-        }
-
-        self.super_terminator(terminator, location);
-    }
-}
-
-fn find_duplicates(body: &Body<'_>) -> FxHashMap<BasicBlock, BasicBlock> {
-    let mut duplicates = FxHashMap::default();
-
-    let bbs_to_go_through =
-        body.basic_blocks.iter_enumerated().filter(|(_, bbd)| !bbd.is_cleanup).count();
-
-    let mut same_hashes =
-        FxHashMap::with_capacity_and_hasher(bbs_to_go_through, Default::default());
-
-    // Go through the basic blocks backwards. This means that in case of duplicates,
-    // we can use the basic block with the highest index as the replacement for all lower ones.
-    // For example, if bb1, bb2 and bb3 are duplicates, we will first insert bb3 in same_hashes.
-    // Then we will see that bb2 is a duplicate of bb3,
-    // and insert bb2 with the replacement bb3 in the duplicates list.
-    // When we see bb1, we see that it is a duplicate of bb3, and therefore insert it in the
-    // duplicates list with replacement bb3.
-    // When the duplicates are removed, we will end up with only bb3.
-    for (bb, bbd) in body.basic_blocks.iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup) {
-        // Basic blocks can get really big, so to avoid checking for duplicates in basic blocks
-        // that are unlikely to have duplicates, we stop early. The early bail number has been
-        // found experimentally by eprintln while compiling the crates in the rustc-perf suite.
-        if bbd.statements.len() > 10 {
-            continue;
-        }
-
-        let to_hash = BasicBlockHashable { basic_block_data: bbd };
-        let entry = same_hashes.entry(to_hash);
-        match entry {
-            Entry::Occupied(occupied) => {
-                // The basic block was already in the hashmap, which means we have a duplicate
-                let value = *occupied.get();
-                debug!("Inserting {:?} -> {:?}", bb, value);
-                duplicates.try_insert(bb, value).expect("key was already inserted");
-            }
-            Entry::Vacant(vacant) => {
-                vacant.insert(bb);
-            }
-        }
-    }
-
-    duplicates
-}
-
-struct BasicBlockHashable<'a, 'tcx> {
-    basic_block_data: &'a BasicBlockData<'tcx>,
-}
-
-impl Hash for BasicBlockHashable<'_, '_> {
-    fn hash<H: Hasher>(&self, state: &mut H) {
-        hash_statements(state, self.basic_block_data.statements.iter());
-        // Note that since we only hash the kind, we lose span information if we deduplicate the
-        // blocks.
-        self.basic_block_data.terminator().kind.hash(state);
-    }
-}
-
-impl Eq for BasicBlockHashable<'_, '_> {}
-
-impl PartialEq for BasicBlockHashable<'_, '_> {
-    fn eq(&self, other: &Self) -> bool {
-        self.basic_block_data.statements.len() == other.basic_block_data.statements.len()
-            && &self.basic_block_data.terminator().kind == &other.basic_block_data.terminator().kind
-            && iter::zip(&self.basic_block_data.statements, &other.basic_block_data.statements)
-                .all(|(x, y)| statement_eq(&x.kind, &y.kind))
-    }
-}
-
-fn hash_statements<'a, 'tcx, H: Hasher>(
-    hasher: &mut H,
-    iter: impl Iterator<Item = &'a Statement<'tcx>>,
-) where
-    'tcx: 'a,
-{
-    for stmt in iter {
-        statement_hash(hasher, &stmt.kind);
-    }
-}
-
-fn statement_hash<H: Hasher>(hasher: &mut H, stmt: &StatementKind<'_>) {
-    match stmt {
-        StatementKind::Assign(box (place, rvalue)) => {
-            place.hash(hasher);
-            rvalue_hash(hasher, rvalue)
-        }
-        x => x.hash(hasher),
-    };
-}
-
-fn rvalue_hash<H: Hasher>(hasher: &mut H, rvalue: &Rvalue<'_>) {
-    match rvalue {
-        Rvalue::Use(op) => operand_hash(hasher, op),
-        x => x.hash(hasher),
-    };
-}
-
-fn operand_hash<H: Hasher>(hasher: &mut H, operand: &Operand<'_>) {
-    match operand {
-        Operand::Constant(box ConstOperand { user_ty: _, const_, span: _ }) => const_.hash(hasher),
-        x => x.hash(hasher),
-    };
-}
-
-fn statement_eq<'tcx>(lhs: &StatementKind<'tcx>, rhs: &StatementKind<'tcx>) -> bool {
-    let res = match (lhs, rhs) {
-        (
-            StatementKind::Assign(box (place, rvalue)),
-            StatementKind::Assign(box (place2, rvalue2)),
-        ) => place == place2 && rvalue_eq(rvalue, rvalue2),
-        (x, y) => x == y,
-    };
-    debug!("statement_eq lhs: `{:?}` rhs: `{:?}` result: {:?}", lhs, rhs, res);
-    res
-}
-
-fn rvalue_eq<'tcx>(lhs: &Rvalue<'tcx>, rhs: &Rvalue<'tcx>) -> bool {
-    let res = match (lhs, rhs) {
-        (Rvalue::Use(op1), Rvalue::Use(op2)) => operand_eq(op1, op2),
-        (x, y) => x == y,
-    };
-    debug!("rvalue_eq lhs: `{:?}` rhs: `{:?}` result: {:?}", lhs, rhs, res);
-    res
-}
-
-fn operand_eq<'tcx>(lhs: &Operand<'tcx>, rhs: &Operand<'tcx>) -> bool {
-    let res = match (lhs, rhs) {
-        (
-            Operand::Constant(box ConstOperand { user_ty: _, const_, span: _ }),
-            Operand::Constant(box ConstOperand { user_ty: _, const_: const2, span: _ }),
-        ) => const_ == const2,
-        (x, y) => x == y,
-    };
-    debug!("operand_eq lhs: `{:?}` rhs: `{:?}` result: {:?}", lhs, rhs, res);
-    res
-}
diff --git a/compiler/rustc_mir_transform/src/deref_separator.rs b/compiler/rustc_mir_transform/src/deref_separator.rs
index a54bdaa4075..bc914ea6564 100644
--- a/compiler/rustc_mir_transform/src/deref_separator.rs
+++ b/compiler/rustc_mir_transform/src/deref_separator.rs
@@ -1,9 +1,10 @@
-use rustc_middle::mir::patch::MirPatch;
 use rustc_middle::mir::visit::NonUseContext::VarDebugInfo;
 use rustc_middle::mir::visit::{MutVisitor, PlaceContext};
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
 
+use crate::patch::MirPatch;
+
 pub(super) struct Derefer;
 
 struct DerefChecker<'a, 'tcx> {
diff --git a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs
index eb335a1e4a7..57f7893be1b 100644
--- a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs
+++ b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs
@@ -1,11 +1,11 @@
 use std::fmt::Debug;
 
-use rustc_middle::mir::patch::MirPatch;
 use rustc_middle::mir::*;
 use rustc_middle::ty::{Ty, TyCtxt};
 use tracing::trace;
 
 use super::simplify::simplify_cfg;
+use crate::patch::MirPatch;
 
 /// This pass optimizes something like
 /// ```ignore (syntax-highlighting-only)
diff --git a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
index 6d3b5d9851b..5c344a80688 100644
--- a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
@@ -4,12 +4,13 @@
 
 use rustc_abi::FieldIdx;
 use rustc_hir::def_id::DefId;
-use rustc_middle::mir::patch::MirPatch;
 use rustc_middle::mir::visit::MutVisitor;
 use rustc_middle::mir::*;
 use rustc_middle::span_bug;
 use rustc_middle::ty::{Ty, TyCtxt};
 
+use crate::patch::MirPatch;
+
 /// Constructs the types used when accessing a Box's pointer
 fn build_ptr_tys<'tcx>(
     tcx: TyCtxt<'tcx>,
diff --git a/compiler/rustc_mir_transform/src/elaborate_drop.rs b/compiler/rustc_mir_transform/src/elaborate_drop.rs
new file mode 100644
index 00000000000..0d8cf524661
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/elaborate_drop.rs
@@ -0,0 +1,1058 @@
+use std::{fmt, iter, mem};
+
+use rustc_abi::{FIRST_VARIANT, FieldIdx, VariantIdx};
+use rustc_hir::lang_items::LangItem;
+use rustc_index::Idx;
+use rustc_middle::mir::*;
+use rustc_middle::span_bug;
+use rustc_middle::ty::adjustment::PointerCoercion;
+use rustc_middle::ty::util::IntTypeExt;
+use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt};
+use rustc_span::DUMMY_SP;
+use rustc_span::source_map::Spanned;
+use tracing::{debug, instrument};
+
+use crate::patch::MirPatch;
+
+/// Describes how/if a value should be dropped.
+#[derive(Debug)]
+pub(crate) enum DropStyle {
+    /// The value is already dead at the drop location, no drop will be executed.
+    Dead,
+
+    /// The value is known to always be initialized at the drop location, drop will always be
+    /// executed.
+    Static,
+
+    /// Whether the value needs to be dropped depends on its drop flag.
+    Conditional,
+
+    /// An "open" drop is one where only the fields of a value are dropped.
+    ///
+    /// For example, this happens when moving out of a struct field: The rest of the struct will be
+    /// dropped in such an "open" drop. It is also used to generate drop glue for the individual
+    /// components of a value, for example for dropping array elements.
+    Open,
+}
+
+/// Which drop flags to affect/check with an operation.
+#[derive(Debug)]
+pub(crate) enum DropFlagMode {
+    /// Only affect the top-level drop flag, not that of any contained fields.
+    Shallow,
+    /// Affect all nested drop flags in addition to the top-level one.
+    Deep,
+}
+
+/// Describes if unwinding is necessary and where to unwind to if a panic occurs.
+#[derive(Copy, Clone, Debug)]
+pub(crate) enum Unwind {
+    /// Unwind to this block.
+    To(BasicBlock),
+    /// Already in an unwind path, any panic will cause an abort.
+    InCleanup,
+}
+
+impl Unwind {
+    fn is_cleanup(self) -> bool {
+        match self {
+            Unwind::To(..) => false,
+            Unwind::InCleanup => true,
+        }
+    }
+
+    fn into_action(self) -> UnwindAction {
+        match self {
+            Unwind::To(bb) => UnwindAction::Cleanup(bb),
+            Unwind::InCleanup => UnwindAction::Terminate(UnwindTerminateReason::InCleanup),
+        }
+    }
+
+    fn map<F>(self, f: F) -> Self
+    where
+        F: FnOnce(BasicBlock) -> BasicBlock,
+    {
+        match self {
+            Unwind::To(bb) => Unwind::To(f(bb)),
+            Unwind::InCleanup => Unwind::InCleanup,
+        }
+    }
+}
+
+pub(crate) trait DropElaborator<'a, 'tcx>: fmt::Debug {
+    /// The type representing paths that can be moved out of.
+    ///
+    /// Users can move out of individual fields of a struct, such as `a.b.c`. This type is used to
+    /// represent such move paths. Sometimes tracking individual move paths is not necessary, in
+    /// which case this may be set to (for example) `()`.
+    type Path: Copy + fmt::Debug;
+
+    // Accessors
+
+    fn patch_ref(&self) -> &MirPatch<'tcx>;
+    fn patch(&mut self) -> &mut MirPatch<'tcx>;
+    fn body(&self) -> &'a Body<'tcx>;
+    fn tcx(&self) -> TyCtxt<'tcx>;
+    fn typing_env(&self) -> ty::TypingEnv<'tcx>;
+
+    // Drop logic
+
+    /// Returns how `path` should be dropped, given `mode`.
+    fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle;
+
+    /// Returns the drop flag of `path` as a MIR `Operand` (or `None` if `path` has no drop flag).
+    fn get_drop_flag(&mut self, path: Self::Path) -> Option<Operand<'tcx>>;
+
+    /// Modifies the MIR patch so that the drop flag of `path` (if any) is cleared at `location`.
+    ///
+    /// If `mode` is deep, drop flags of all child paths should also be cleared by inserting
+    /// additional statements.
+    fn clear_drop_flag(&mut self, location: Location, path: Self::Path, mode: DropFlagMode);
+
+    // Subpaths
+
+    /// Returns the subpath of a field of `path` (or `None` if there is no dedicated subpath).
+    ///
+    /// If this returns `None`, `field` will not get a dedicated drop flag.
+    fn field_subpath(&self, path: Self::Path, field: FieldIdx) -> Option<Self::Path>;
+
+    /// Returns the subpath of a dereference of `path` (or `None` if there is no dedicated subpath).
+    ///
+    /// If this returns `None`, `*path` will not get a dedicated drop flag.
+    ///
+    /// This is only relevant for `Box<T>`, where the contained `T` can be moved out of the box.
+    fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path>;
+
+    /// Returns the subpath of downcasting `path` to one of its variants.
+    ///
+    /// If this returns `None`, the downcast of `path` will not get a dedicated drop flag.
+    fn downcast_subpath(&self, path: Self::Path, variant: VariantIdx) -> Option<Self::Path>;
+
+    /// Returns the subpath of indexing a fixed-size array `path`.
+    ///
+    /// If this returns `None`, elements of `path` will not get a dedicated drop flag.
+    ///
+    /// This is only relevant for array patterns, which can move out of individual array elements.
+    fn array_subpath(&self, path: Self::Path, index: u64, size: u64) -> Option<Self::Path>;
+}
+
+#[derive(Debug)]
+struct DropCtxt<'a, 'b, 'tcx, D>
+where
+    D: DropElaborator<'b, 'tcx>,
+{
+    elaborator: &'a mut D,
+
+    source_info: SourceInfo,
+
+    place: Place<'tcx>,
+    path: D::Path,
+    succ: BasicBlock,
+    unwind: Unwind,
+}
+
+/// "Elaborates" a drop of `place`/`path` and patches `bb`'s terminator to execute it.
+///
+/// The passed `elaborator` is used to determine what should happen at the drop terminator. It
+/// decides whether the drop can be statically determined or whether it needs a dynamic drop flag,
+/// and whether the drop is "open", ie. should be expanded to drop all subfields of the dropped
+/// value.
+///
+/// When this returns, the MIR patch in the `elaborator` contains the necessary changes.
+pub(crate) fn elaborate_drop<'b, 'tcx, D>(
+    elaborator: &mut D,
+    source_info: SourceInfo,
+    place: Place<'tcx>,
+    path: D::Path,
+    succ: BasicBlock,
+    unwind: Unwind,
+    bb: BasicBlock,
+) where
+    D: DropElaborator<'b, 'tcx>,
+    'tcx: 'b,
+{
+    DropCtxt { elaborator, source_info, place, path, succ, unwind }.elaborate_drop(bb)
+}
+
+impl<'a, 'b, 'tcx, D> DropCtxt<'a, 'b, 'tcx, D>
+where
+    D: DropElaborator<'b, 'tcx>,
+    'tcx: 'b,
+{
+    #[instrument(level = "trace", skip(self), ret)]
+    fn place_ty(&self, place: Place<'tcx>) -> Ty<'tcx> {
+        if place.local < self.elaborator.body().local_decls.next_index() {
+            place.ty(self.elaborator.body(), self.tcx()).ty
+        } else {
+            // We don't have a slice with all the locals, since some are in the patch.
+            PlaceTy::from_ty(self.elaborator.patch_ref().local_ty(place.local))
+                .multi_projection_ty(self.elaborator.tcx(), place.projection)
+                .ty
+        }
+    }
+
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.elaborator.tcx()
+    }
+
+    /// This elaborates a single drop instruction, located at `bb`, and
+    /// patches over it.
+    ///
+    /// The elaborated drop checks the drop flags to only drop what
+    /// is initialized.
+    ///
+    /// In addition, the relevant drop flags also need to be cleared
+    /// to avoid double-drops. However, in the middle of a complex
+    /// drop, one must avoid clearing some of the flags before they
+    /// are read, as that would cause a memory leak.
+    ///
+    /// In particular, when dropping an ADT, multiple fields may be
+    /// joined together under the `rest` subpath. They are all controlled
+    /// by the primary drop flag, but only the last rest-field dropped
+    /// should clear it (and it must also not clear anything else).
+    //
+    // FIXME: I think we should just control the flags externally,
+    // and then we do not need this machinery.
+    #[instrument(level = "debug")]
+    fn elaborate_drop(&mut self, bb: BasicBlock) {
+        match self.elaborator.drop_style(self.path, DropFlagMode::Deep) {
+            DropStyle::Dead => {
+                self.elaborator
+                    .patch()
+                    .patch_terminator(bb, TerminatorKind::Goto { target: self.succ });
+            }
+            DropStyle::Static => {
+                self.elaborator.patch().patch_terminator(
+                    bb,
+                    TerminatorKind::Drop {
+                        place: self.place,
+                        target: self.succ,
+                        unwind: self.unwind.into_action(),
+                        replace: false,
+                    },
+                );
+            }
+            DropStyle::Conditional => {
+                let drop_bb = self.complete_drop(self.succ, self.unwind);
+                self.elaborator
+                    .patch()
+                    .patch_terminator(bb, TerminatorKind::Goto { target: drop_bb });
+            }
+            DropStyle::Open => {
+                let drop_bb = self.open_drop();
+                self.elaborator
+                    .patch()
+                    .patch_terminator(bb, TerminatorKind::Goto { target: drop_bb });
+            }
+        }
+    }
+
+    /// Returns the place and move path for each field of `variant`,
+    /// (the move path is `None` if the field is a rest field).
+    fn move_paths_for_fields(
+        &self,
+        base_place: Place<'tcx>,
+        variant_path: D::Path,
+        variant: &'tcx ty::VariantDef,
+        args: GenericArgsRef<'tcx>,
+    ) -> Vec<(Place<'tcx>, Option<D::Path>)> {
+        variant
+            .fields
+            .iter()
+            .enumerate()
+            .map(|(i, f)| {
+                let field = FieldIdx::new(i);
+                let subpath = self.elaborator.field_subpath(variant_path, field);
+                let tcx = self.tcx();
+
+                assert_eq!(self.elaborator.typing_env().typing_mode, ty::TypingMode::PostAnalysis);
+                // The type error for normalization may have been in dropck: see
+                // `compute_drop_data` in rustc_borrowck, in which case we wouldn't have
+                // deleted the MIR body and could have an error here as well.
+                let field_ty = match tcx
+                    .try_normalize_erasing_regions(self.elaborator.typing_env(), f.ty(tcx, args))
+                {
+                    Ok(t) => t,
+                    Err(_) => Ty::new_error(
+                        self.tcx(),
+                        self.elaborator
+                            .body()
+                            .tainted_by_errors
+                            .expect("Error in drop elaboration not found by dropck."),
+                    ),
+                };
+
+                (tcx.mk_place_field(base_place, field, field_ty), subpath)
+            })
+            .collect()
+    }
+
+    fn drop_subpath(
+        &mut self,
+        place: Place<'tcx>,
+        path: Option<D::Path>,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        if let Some(path) = path {
+            debug!("drop_subpath: for std field {:?}", place);
+
+            DropCtxt {
+                elaborator: self.elaborator,
+                source_info: self.source_info,
+                path,
+                place,
+                succ,
+                unwind,
+            }
+            .elaborated_drop_block()
+        } else {
+            debug!("drop_subpath: for rest field {:?}", place);
+
+            DropCtxt {
+                elaborator: self.elaborator,
+                source_info: self.source_info,
+                place,
+                succ,
+                unwind,
+                // Using `self.path` here to condition the drop on
+                // our own drop flag.
+                path: self.path,
+            }
+            .complete_drop(succ, unwind)
+        }
+    }
+
+    /// Creates one-half of the drop ladder for a list of fields, and return
+    /// the list of steps in it in reverse order, with the first step
+    /// dropping 0 fields and so on.
+    ///
+    /// `unwind_ladder` is such a list of steps in reverse order,
+    /// which is called if the matching step of the drop glue panics.
+    fn drop_halfladder(
+        &mut self,
+        unwind_ladder: &[Unwind],
+        mut succ: BasicBlock,
+        fields: &[(Place<'tcx>, Option<D::Path>)],
+    ) -> Vec<BasicBlock> {
+        iter::once(succ)
+            .chain(fields.iter().rev().zip(unwind_ladder).map(|(&(place, path), &unwind_succ)| {
+                succ = self.drop_subpath(place, path, succ, unwind_succ);
+                succ
+            }))
+            .collect()
+    }
+
+    fn drop_ladder_bottom(&mut self) -> (BasicBlock, Unwind) {
+        // Clear the "master" drop flag at the end. This is needed
+        // because the "master" drop protects the ADT's discriminant,
+        // which is invalidated after the ADT is dropped.
+        (self.drop_flag_reset_block(DropFlagMode::Shallow, self.succ, self.unwind), self.unwind)
+    }
+
+    /// Creates a full drop ladder, consisting of 2 connected half-drop-ladders
+    ///
+    /// For example, with 3 fields, the drop ladder is
+    ///
+    /// .d0:
+    ///     ELAB(drop location.0 [target=.d1, unwind=.c1])
+    /// .d1:
+    ///     ELAB(drop location.1 [target=.d2, unwind=.c2])
+    /// .d2:
+    ///     ELAB(drop location.2 [target=`self.succ`, unwind=`self.unwind`])
+    /// .c1:
+    ///     ELAB(drop location.1 [target=.c2])
+    /// .c2:
+    ///     ELAB(drop location.2 [target=`self.unwind`])
+    ///
+    /// NOTE: this does not clear the master drop flag, so you need
+    /// to point succ/unwind on a `drop_ladder_bottom`.
+    fn drop_ladder(
+        &mut self,
+        fields: Vec<(Place<'tcx>, Option<D::Path>)>,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> (BasicBlock, Unwind) {
+        debug!("drop_ladder({:?}, {:?})", self, fields);
+
+        let mut fields = fields;
+        fields.retain(|&(place, _)| {
+            self.place_ty(place).needs_drop(self.tcx(), self.elaborator.typing_env())
+        });
+
+        debug!("drop_ladder - fields needing drop: {:?}", fields);
+
+        let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1];
+        let unwind_ladder: Vec<_> = if let Unwind::To(target) = unwind {
+            let halfladder = self.drop_halfladder(&unwind_ladder, target, &fields);
+            halfladder.into_iter().map(Unwind::To).collect()
+        } else {
+            unwind_ladder
+        };
+
+        let normal_ladder = self.drop_halfladder(&unwind_ladder, succ, &fields);
+
+        (*normal_ladder.last().unwrap(), *unwind_ladder.last().unwrap())
+    }
+
+    fn open_drop_for_tuple(&mut self, tys: &[Ty<'tcx>]) -> BasicBlock {
+        debug!("open_drop_for_tuple({:?}, {:?})", self, tys);
+
+        let fields = tys
+            .iter()
+            .enumerate()
+            .map(|(i, &ty)| {
+                (
+                    self.tcx().mk_place_field(self.place, FieldIdx::new(i), ty),
+                    self.elaborator.field_subpath(self.path, FieldIdx::new(i)),
+                )
+            })
+            .collect();
+
+        let (succ, unwind) = self.drop_ladder_bottom();
+        self.drop_ladder(fields, succ, unwind).0
+    }
+
+    /// Drops the T contained in a `Box<T>` if it has not been moved out of
+    #[instrument(level = "debug", ret)]
+    fn open_drop_for_box_contents(
+        &mut self,
+        adt: ty::AdtDef<'tcx>,
+        args: GenericArgsRef<'tcx>,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        // drop glue is sent straight to codegen
+        // box cannot be directly dereferenced
+        let unique_ty = adt.non_enum_variant().fields[FieldIdx::ZERO].ty(self.tcx(), args);
+        let unique_variant = unique_ty.ty_adt_def().unwrap().non_enum_variant();
+        let nonnull_ty = unique_variant.fields[FieldIdx::ZERO].ty(self.tcx(), args);
+        let ptr_ty = Ty::new_imm_ptr(self.tcx(), args[0].expect_ty());
+
+        let unique_place = self.tcx().mk_place_field(self.place, FieldIdx::ZERO, unique_ty);
+        let nonnull_place = self.tcx().mk_place_field(unique_place, FieldIdx::ZERO, nonnull_ty);
+
+        let ptr_local = self.new_temp(ptr_ty);
+
+        let interior = self.tcx().mk_place_deref(Place::from(ptr_local));
+        let interior_path = self.elaborator.deref_subpath(self.path);
+
+        let do_drop_bb = self.drop_subpath(interior, interior_path, succ, unwind);
+
+        let setup_bbd = BasicBlockData {
+            statements: vec![self.assign(
+                Place::from(ptr_local),
+                Rvalue::Cast(CastKind::Transmute, Operand::Copy(nonnull_place), ptr_ty),
+            )],
+            terminator: Some(Terminator {
+                kind: TerminatorKind::Goto { target: do_drop_bb },
+                source_info: self.source_info,
+            }),
+            is_cleanup: unwind.is_cleanup(),
+        };
+        self.elaborator.patch().new_block(setup_bbd)
+    }
+
+    #[instrument(level = "debug", ret)]
+    fn open_drop_for_adt(
+        &mut self,
+        adt: ty::AdtDef<'tcx>,
+        args: GenericArgsRef<'tcx>,
+    ) -> BasicBlock {
+        if adt.variants().is_empty() {
+            return self.elaborator.patch().new_block(BasicBlockData {
+                statements: vec![],
+                terminator: Some(Terminator {
+                    source_info: self.source_info,
+                    kind: TerminatorKind::Unreachable,
+                }),
+                is_cleanup: self.unwind.is_cleanup(),
+            });
+        }
+
+        let skip_contents = adt.is_union() || adt.is_manually_drop();
+        let contents_drop = if skip_contents {
+            (self.succ, self.unwind)
+        } else {
+            self.open_drop_for_adt_contents(adt, args)
+        };
+
+        if adt.is_box() {
+            // we need to drop the inside of the box before running the destructor
+            let succ = self.destructor_call_block(contents_drop);
+            let unwind = contents_drop
+                .1
+                .map(|unwind| self.destructor_call_block((unwind, Unwind::InCleanup)));
+
+            self.open_drop_for_box_contents(adt, args, succ, unwind)
+        } else if adt.has_dtor(self.tcx()) {
+            self.destructor_call_block(contents_drop)
+        } else {
+            contents_drop.0
+        }
+    }
+
+    fn open_drop_for_adt_contents(
+        &mut self,
+        adt: ty::AdtDef<'tcx>,
+        args: GenericArgsRef<'tcx>,
+    ) -> (BasicBlock, Unwind) {
+        let (succ, unwind) = self.drop_ladder_bottom();
+        if !adt.is_enum() {
+            let fields =
+                self.move_paths_for_fields(self.place, self.path, adt.variant(FIRST_VARIANT), args);
+            self.drop_ladder(fields, succ, unwind)
+        } else {
+            self.open_drop_for_multivariant(adt, args, succ, unwind)
+        }
+    }
+
+    fn open_drop_for_multivariant(
+        &mut self,
+        adt: ty::AdtDef<'tcx>,
+        args: GenericArgsRef<'tcx>,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> (BasicBlock, Unwind) {
+        let mut values = Vec::with_capacity(adt.variants().len());
+        let mut normal_blocks = Vec::with_capacity(adt.variants().len());
+        let mut unwind_blocks =
+            if unwind.is_cleanup() { None } else { Some(Vec::with_capacity(adt.variants().len())) };
+
+        let mut have_otherwise_with_drop_glue = false;
+        let mut have_otherwise = false;
+        let tcx = self.tcx();
+
+        for (variant_index, discr) in adt.discriminants(tcx) {
+            let variant = &adt.variant(variant_index);
+            let subpath = self.elaborator.downcast_subpath(self.path, variant_index);
+
+            if let Some(variant_path) = subpath {
+                let base_place = tcx.mk_place_elem(
+                    self.place,
+                    ProjectionElem::Downcast(Some(variant.name), variant_index),
+                );
+                let fields = self.move_paths_for_fields(base_place, variant_path, variant, args);
+                values.push(discr.val);
+                if let Unwind::To(unwind) = unwind {
+                    // We can't use the half-ladder from the original
+                    // drop ladder, because this breaks the
+                    // "funclet can't have 2 successor funclets"
+                    // requirement from MSVC:
+                    //
+                    //           switch       unwind-switch
+                    //          /      \         /        \
+                    //         v1.0    v2.0  v2.0-unwind  v1.0-unwind
+                    //         |        |      /             |
+                    //    v1.1-unwind  v2.1-unwind           |
+                    //      ^                                |
+                    //       \-------------------------------/
+                    //
+                    // Create a duplicate half-ladder to avoid that. We
+                    // could technically only do this on MSVC, but I
+                    // I want to minimize the divergence between MSVC
+                    // and non-MSVC.
+
+                    let unwind_blocks = unwind_blocks.as_mut().unwrap();
+                    let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1];
+                    let halfladder = self.drop_halfladder(&unwind_ladder, unwind, &fields);
+                    unwind_blocks.push(halfladder.last().cloned().unwrap());
+                }
+                let (normal, _) = self.drop_ladder(fields, succ, unwind);
+                normal_blocks.push(normal);
+            } else {
+                have_otherwise = true;
+
+                let typing_env = self.elaborator.typing_env();
+                let have_field_with_drop_glue = variant
+                    .fields
+                    .iter()
+                    .any(|field| field.ty(tcx, args).needs_drop(tcx, typing_env));
+                if have_field_with_drop_glue {
+                    have_otherwise_with_drop_glue = true;
+                }
+            }
+        }
+
+        if !have_otherwise {
+            values.pop();
+        } else if !have_otherwise_with_drop_glue {
+            normal_blocks.push(self.goto_block(succ, unwind));
+            if let Unwind::To(unwind) = unwind {
+                unwind_blocks.as_mut().unwrap().push(self.goto_block(unwind, Unwind::InCleanup));
+            }
+        } else {
+            normal_blocks.push(self.drop_block(succ, unwind));
+            if let Unwind::To(unwind) = unwind {
+                unwind_blocks.as_mut().unwrap().push(self.drop_block(unwind, Unwind::InCleanup));
+            }
+        }
+
+        (
+            self.adt_switch_block(adt, normal_blocks, &values, succ, unwind),
+            unwind.map(|unwind| {
+                self.adt_switch_block(
+                    adt,
+                    unwind_blocks.unwrap(),
+                    &values,
+                    unwind,
+                    Unwind::InCleanup,
+                )
+            }),
+        )
+    }
+
+    fn adt_switch_block(
+        &mut self,
+        adt: ty::AdtDef<'tcx>,
+        blocks: Vec<BasicBlock>,
+        values: &[u128],
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        // If there are multiple variants, then if something
+        // is present within the enum the discriminant, tracked
+        // by the rest path, must be initialized.
+        //
+        // Additionally, we do not want to switch on the
+        // discriminant after it is free-ed, because that
+        // way lies only trouble.
+        let discr_ty = adt.repr().discr_type().to_ty(self.tcx());
+        let discr = Place::from(self.new_temp(discr_ty));
+        let discr_rv = Rvalue::Discriminant(self.place);
+        let switch_block = BasicBlockData {
+            statements: vec![self.assign(discr, discr_rv)],
+            terminator: Some(Terminator {
+                source_info: self.source_info,
+                kind: TerminatorKind::SwitchInt {
+                    discr: Operand::Move(discr),
+                    targets: SwitchTargets::new(
+                        values.iter().copied().zip(blocks.iter().copied()),
+                        *blocks.last().unwrap(),
+                    ),
+                },
+            }),
+            is_cleanup: unwind.is_cleanup(),
+        };
+        let switch_block = self.elaborator.patch().new_block(switch_block);
+        self.drop_flag_test_block(switch_block, succ, unwind)
+    }
+
+    fn destructor_call_block(&mut self, (succ, unwind): (BasicBlock, Unwind)) -> BasicBlock {
+        debug!("destructor_call_block({:?}, {:?})", self, succ);
+        let tcx = self.tcx();
+        let drop_trait = tcx.require_lang_item(LangItem::Drop, None);
+        let drop_fn = tcx.associated_item_def_ids(drop_trait)[0];
+        let ty = self.place_ty(self.place);
+
+        let ref_ty = Ty::new_mut_ref(tcx, tcx.lifetimes.re_erased, ty);
+        let ref_place = self.new_temp(ref_ty);
+        let unit_temp = Place::from(self.new_temp(tcx.types.unit));
+
+        let result = BasicBlockData {
+            statements: vec![self.assign(
+                Place::from(ref_place),
+                Rvalue::Ref(
+                    tcx.lifetimes.re_erased,
+                    BorrowKind::Mut { kind: MutBorrowKind::Default },
+                    self.place,
+                ),
+            )],
+            terminator: Some(Terminator {
+                kind: TerminatorKind::Call {
+                    func: Operand::function_handle(
+                        tcx,
+                        drop_fn,
+                        [ty.into()],
+                        self.source_info.span,
+                    ),
+                    args: [Spanned { node: Operand::Move(Place::from(ref_place)), span: DUMMY_SP }]
+                        .into(),
+                    destination: unit_temp,
+                    target: Some(succ),
+                    unwind: unwind.into_action(),
+                    call_source: CallSource::Misc,
+                    fn_span: self.source_info.span,
+                },
+                source_info: self.source_info,
+            }),
+            is_cleanup: unwind.is_cleanup(),
+        };
+
+        let destructor_block = self.elaborator.patch().new_block(result);
+
+        let block_start = Location { block: destructor_block, statement_index: 0 };
+        self.elaborator.clear_drop_flag(block_start, self.path, DropFlagMode::Shallow);
+
+        self.drop_flag_test_block(destructor_block, succ, unwind)
+    }
+
+    /// Create a loop that drops an array:
+    ///
+    /// ```text
+    /// loop-block:
+    ///    can_go = cur == len
+    ///    if can_go then succ else drop-block
+    /// drop-block:
+    ///    ptr = &raw mut P[cur]
+    ///    cur = cur + 1
+    ///    drop(ptr)
+    /// ```
+    fn drop_loop(
+        &mut self,
+        succ: BasicBlock,
+        cur: Local,
+        len: Local,
+        ety: Ty<'tcx>,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        let copy = |place: Place<'tcx>| Operand::Copy(place);
+        let move_ = |place: Place<'tcx>| Operand::Move(place);
+        let tcx = self.tcx();
+
+        let ptr_ty = Ty::new_mut_ptr(tcx, ety);
+        let ptr = Place::from(self.new_temp(ptr_ty));
+        let can_go = Place::from(self.new_temp(tcx.types.bool));
+        let one = self.constant_usize(1);
+
+        let drop_block = BasicBlockData {
+            statements: vec![
+                self.assign(
+                    ptr,
+                    Rvalue::RawPtr(RawPtrKind::Mut, tcx.mk_place_index(self.place, cur)),
+                ),
+                self.assign(
+                    cur.into(),
+                    Rvalue::BinaryOp(BinOp::Add, Box::new((move_(cur.into()), one))),
+                ),
+            ],
+            is_cleanup: unwind.is_cleanup(),
+            terminator: Some(Terminator {
+                source_info: self.source_info,
+                // this gets overwritten by drop elaboration.
+                kind: TerminatorKind::Unreachable,
+            }),
+        };
+        let drop_block = self.elaborator.patch().new_block(drop_block);
+
+        let loop_block = BasicBlockData {
+            statements: vec![self.assign(
+                can_go,
+                Rvalue::BinaryOp(BinOp::Eq, Box::new((copy(Place::from(cur)), copy(len.into())))),
+            )],
+            is_cleanup: unwind.is_cleanup(),
+            terminator: Some(Terminator {
+                source_info: self.source_info,
+                kind: TerminatorKind::if_(move_(can_go), succ, drop_block),
+            }),
+        };
+        let loop_block = self.elaborator.patch().new_block(loop_block);
+
+        self.elaborator.patch().patch_terminator(
+            drop_block,
+            TerminatorKind::Drop {
+                place: tcx.mk_place_deref(ptr),
+                target: loop_block,
+                unwind: unwind.into_action(),
+                replace: false,
+            },
+        );
+
+        loop_block
+    }
+
+    fn open_drop_for_array(
+        &mut self,
+        array_ty: Ty<'tcx>,
+        ety: Ty<'tcx>,
+        opt_size: Option<u64>,
+    ) -> BasicBlock {
+        debug!("open_drop_for_array({:?}, {:?}, {:?})", array_ty, ety, opt_size);
+        let tcx = self.tcx();
+
+        if let Some(size) = opt_size {
+            enum ProjectionKind<Path> {
+                Drop(std::ops::Range<u64>),
+                Keep(u64, Path),
+            }
+            // Previously, we'd make a projection for every element in the array and create a drop
+            // ladder if any `array_subpath` was `Some`, i.e. moving out with an array pattern.
+            // This caused huge memory usage when generating the drops for large arrays, so we instead
+            // record the *subslices* which are dropped and the *indexes* which are kept
+            let mut drop_ranges = vec![];
+            let mut dropping = true;
+            let mut start = 0;
+            for i in 0..size {
+                let path = self.elaborator.array_subpath(self.path, i, size);
+                if dropping && path.is_some() {
+                    drop_ranges.push(ProjectionKind::Drop(start..i));
+                    dropping = false;
+                } else if !dropping && path.is_none() {
+                    dropping = true;
+                    start = i;
+                }
+                if let Some(path) = path {
+                    drop_ranges.push(ProjectionKind::Keep(i, path));
+                }
+            }
+            if !drop_ranges.is_empty() {
+                if dropping {
+                    drop_ranges.push(ProjectionKind::Drop(start..size));
+                }
+                let fields = drop_ranges
+                    .iter()
+                    .rev()
+                    .map(|p| {
+                        let (project, path) = match p {
+                            ProjectionKind::Drop(r) => (
+                                ProjectionElem::Subslice {
+                                    from: r.start,
+                                    to: r.end,
+                                    from_end: false,
+                                },
+                                None,
+                            ),
+                            &ProjectionKind::Keep(offset, path) => (
+                                ProjectionElem::ConstantIndex {
+                                    offset,
+                                    min_length: size,
+                                    from_end: false,
+                                },
+                                Some(path),
+                            ),
+                        };
+                        (tcx.mk_place_elem(self.place, project), path)
+                    })
+                    .collect::<Vec<_>>();
+                let (succ, unwind) = self.drop_ladder_bottom();
+                return self.drop_ladder(fields, succ, unwind).0;
+            }
+        }
+
+        let array_ptr_ty = Ty::new_mut_ptr(tcx, array_ty);
+        let array_ptr = self.new_temp(array_ptr_ty);
+
+        let slice_ty = Ty::new_slice(tcx, ety);
+        let slice_ptr_ty = Ty::new_mut_ptr(tcx, slice_ty);
+        let slice_ptr = self.new_temp(slice_ptr_ty);
+
+        let mut delegate_block = BasicBlockData {
+            statements: vec![
+                self.assign(Place::from(array_ptr), Rvalue::RawPtr(RawPtrKind::Mut, self.place)),
+                self.assign(
+                    Place::from(slice_ptr),
+                    Rvalue::Cast(
+                        CastKind::PointerCoercion(
+                            PointerCoercion::Unsize,
+                            CoercionSource::Implicit,
+                        ),
+                        Operand::Move(Place::from(array_ptr)),
+                        slice_ptr_ty,
+                    ),
+                ),
+            ],
+            is_cleanup: self.unwind.is_cleanup(),
+            terminator: None,
+        };
+
+        let array_place = mem::replace(
+            &mut self.place,
+            Place::from(slice_ptr).project_deeper(&[PlaceElem::Deref], tcx),
+        );
+        let slice_block = self.drop_loop_pair_for_slice(ety);
+        self.place = array_place;
+
+        delegate_block.terminator = Some(Terminator {
+            source_info: self.source_info,
+            kind: TerminatorKind::Goto { target: slice_block },
+        });
+        self.elaborator.patch().new_block(delegate_block)
+    }
+
+    /// Creates a pair of drop-loops of `place`, which drops its contents, even
+    /// in the case of 1 panic.
+    fn drop_loop_pair_for_slice(&mut self, ety: Ty<'tcx>) -> BasicBlock {
+        debug!("drop_loop_pair_for_slice({:?})", ety);
+        let tcx = self.tcx();
+        let len = self.new_temp(tcx.types.usize);
+        let cur = self.new_temp(tcx.types.usize);
+
+        let unwind =
+            self.unwind.map(|unwind| self.drop_loop(unwind, cur, len, ety, Unwind::InCleanup));
+
+        let loop_block = self.drop_loop(self.succ, cur, len, ety, unwind);
+
+        let [PlaceElem::Deref] = self.place.projection.as_slice() else {
+            span_bug!(
+                self.source_info.span,
+                "Expected place for slice drop shim to be *_n, but it's {:?}",
+                self.place,
+            );
+        };
+
+        let zero = self.constant_usize(0);
+        let block = BasicBlockData {
+            statements: vec![
+                self.assign(
+                    len.into(),
+                    Rvalue::UnaryOp(
+                        UnOp::PtrMetadata,
+                        Operand::Copy(Place::from(self.place.local)),
+                    ),
+                ),
+                self.assign(cur.into(), Rvalue::Use(zero)),
+            ],
+            is_cleanup: unwind.is_cleanup(),
+            terminator: Some(Terminator {
+                source_info: self.source_info,
+                kind: TerminatorKind::Goto { target: loop_block },
+            }),
+        };
+
+        let drop_block = self.elaborator.patch().new_block(block);
+        // FIXME(#34708): handle partially-dropped array/slice elements.
+        let reset_block = self.drop_flag_reset_block(DropFlagMode::Deep, drop_block, unwind);
+        self.drop_flag_test_block(reset_block, self.succ, unwind)
+    }
+
+    /// The slow-path - create an "open", elaborated drop for a type
+    /// which is moved-out-of only partially, and patch `bb` to a jump
+    /// to it. This must not be called on ADTs with a destructor,
+    /// as these can't be moved-out-of, except for `Box<T>`, which is
+    /// special-cased.
+    ///
+    /// This creates a "drop ladder" that drops the needed fields of the
+    /// ADT, both in the success case or if one of the destructors fail.
+    fn open_drop(&mut self) -> BasicBlock {
+        let ty = self.place_ty(self.place);
+        match ty.kind() {
+            ty::Closure(_, args) => self.open_drop_for_tuple(args.as_closure().upvar_tys()),
+            ty::CoroutineClosure(_, args) => {
+                self.open_drop_for_tuple(args.as_coroutine_closure().upvar_tys())
+            }
+            // Note that `elaborate_drops` only drops the upvars of a coroutine,
+            // and this is ok because `open_drop` here can only be reached
+            // within that own coroutine's resume function.
+            // This should only happen for the self argument on the resume function.
+            // It effectively only contains upvars until the coroutine transformation runs.
+            // See librustc_body/transform/coroutine.rs for more details.
+            ty::Coroutine(_, args) => self.open_drop_for_tuple(args.as_coroutine().upvar_tys()),
+            ty::Tuple(fields) => self.open_drop_for_tuple(fields),
+            ty::Adt(def, args) => self.open_drop_for_adt(*def, args),
+            ty::Dynamic(..) => self.complete_drop(self.succ, self.unwind),
+            ty::Array(ety, size) => {
+                let size = size.try_to_target_usize(self.tcx());
+                self.open_drop_for_array(ty, *ety, size)
+            }
+            ty::Slice(ety) => self.drop_loop_pair_for_slice(*ety),
+
+            _ => span_bug!(self.source_info.span, "open drop from non-ADT `{:?}`", ty),
+        }
+    }
+
+    fn complete_drop(&mut self, succ: BasicBlock, unwind: Unwind) -> BasicBlock {
+        debug!("complete_drop(succ={:?}, unwind={:?})", succ, unwind);
+
+        let drop_block = self.drop_block(succ, unwind);
+
+        self.drop_flag_test_block(drop_block, succ, unwind)
+    }
+
+    /// Creates a block that resets the drop flag. If `mode` is deep, all children drop flags will
+    /// also be cleared.
+    fn drop_flag_reset_block(
+        &mut self,
+        mode: DropFlagMode,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        debug!("drop_flag_reset_block({:?},{:?})", self, mode);
+
+        if unwind.is_cleanup() {
+            // The drop flag isn't read again on the unwind path, so don't
+            // bother setting it.
+            return succ;
+        }
+        let block = self.new_block(unwind, TerminatorKind::Goto { target: succ });
+        let block_start = Location { block, statement_index: 0 };
+        self.elaborator.clear_drop_flag(block_start, self.path, mode);
+        block
+    }
+
+    fn elaborated_drop_block(&mut self) -> BasicBlock {
+        debug!("elaborated_drop_block({:?})", self);
+        let blk = self.drop_block(self.succ, self.unwind);
+        self.elaborate_drop(blk);
+        blk
+    }
+
+    fn drop_block(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
+        let block = TerminatorKind::Drop {
+            place: self.place,
+            target,
+            unwind: unwind.into_action(),
+            replace: false,
+        };
+        self.new_block(unwind, block)
+    }
+
+    fn goto_block(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
+        let block = TerminatorKind::Goto { target };
+        self.new_block(unwind, block)
+    }
+
+    /// Returns the block to jump to in order to test the drop flag and execute the drop.
+    ///
+    /// Depending on the required `DropStyle`, this might be a generated block with an `if`
+    /// terminator (for dynamic/open drops), or it might be `on_set` or `on_unset` itself, in case
+    /// the drop can be statically determined.
+    fn drop_flag_test_block(
+        &mut self,
+        on_set: BasicBlock,
+        on_unset: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        let style = self.elaborator.drop_style(self.path, DropFlagMode::Shallow);
+        debug!(
+            "drop_flag_test_block({:?},{:?},{:?},{:?}) - {:?}",
+            self, on_set, on_unset, unwind, style
+        );
+
+        match style {
+            DropStyle::Dead => on_unset,
+            DropStyle::Static => on_set,
+            DropStyle::Conditional | DropStyle::Open => {
+                let flag = self.elaborator.get_drop_flag(self.path).unwrap();
+                let term = TerminatorKind::if_(flag, on_set, on_unset);
+                self.new_block(unwind, term)
+            }
+        }
+    }
+
+    fn new_block(&mut self, unwind: Unwind, k: TerminatorKind<'tcx>) -> BasicBlock {
+        self.elaborator.patch().new_block(BasicBlockData {
+            statements: vec![],
+            terminator: Some(Terminator { source_info: self.source_info, kind: k }),
+            is_cleanup: unwind.is_cleanup(),
+        })
+    }
+
+    fn new_temp(&mut self, ty: Ty<'tcx>) -> Local {
+        self.elaborator.patch().new_temp(ty, self.source_info.span)
+    }
+
+    fn constant_usize(&self, val: u16) -> Operand<'tcx> {
+        Operand::Constant(Box::new(ConstOperand {
+            span: self.source_info.span,
+            user_ty: None,
+            const_: Const::from_usize(self.tcx(), val.into()),
+        }))
+    }
+
+    fn assign(&self, lhs: Place<'tcx>, rhs: Rvalue<'tcx>) -> Statement<'tcx> {
+        Statement {
+            source_info: self.source_info,
+            kind: StatementKind::Assign(Box::new((lhs, rhs))),
+        }
+    }
+}
diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs
index cb869160c80..530c72ca549 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs
@@ -3,21 +3,20 @@ use std::fmt;
 use rustc_abi::{FieldIdx, VariantIdx};
 use rustc_index::IndexVec;
 use rustc_index::bit_set::DenseBitSet;
-use rustc_middle::mir::patch::MirPatch;
 use rustc_middle::mir::*;
 use rustc_middle::ty::{self, TyCtxt};
-use rustc_mir_dataflow::elaborate_drops::{
-    DropElaborator, DropFlagMode, DropFlagState, DropStyle, Unwind, elaborate_drop,
-};
 use rustc_mir_dataflow::impls::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
 use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex};
 use rustc_mir_dataflow::{
-    Analysis, MoveDataTypingEnv, ResultsCursor, on_all_children_bits, on_lookup_result_bits,
+    Analysis, DropFlagState, MoveDataTypingEnv, ResultsCursor, on_all_children_bits,
+    on_lookup_result_bits,
 };
 use rustc_span::Span;
 use tracing::{debug, instrument};
 
 use crate::deref_separator::deref_finder;
+use crate::elaborate_drop::{DropElaborator, DropFlagMode, DropStyle, Unwind, elaborate_drop};
+use crate::patch::MirPatch;
 
 /// During MIR building, Drop terminators are inserted in every place where a drop may occur.
 /// However, in this phase, the presence of these terminators does not guarantee that a destructor
@@ -139,6 +138,10 @@ impl InitializationData<'_, '_> {
 impl<'a, 'tcx> DropElaborator<'a, 'tcx> for ElaborateDropsCtxt<'a, 'tcx> {
     type Path = MovePathIndex;
 
+    fn patch_ref(&self) -> &MirPatch<'tcx> {
+        &self.patch
+    }
+
     fn patch(&mut self) -> &mut MirPatch<'tcx> {
         &mut self.patch
     }
@@ -414,7 +417,7 @@ impl<'a, 'tcx> ElaborateDropsCtxt<'a, 'tcx> {
                 ..
             } = data.terminator().kind
             {
-                assert!(!self.patch.is_patched(bb));
+                assert!(!self.patch.is_term_patched(bb));
 
                 let loc = Location { block: tgt, statement_index: 0 };
                 let path = self.move_data().rev_lookup.find(destination.as_ref());
@@ -459,7 +462,7 @@ impl<'a, 'tcx> ElaborateDropsCtxt<'a, 'tcx> {
                             // a Goto; see `MirPatch::new`).
                         }
                         _ => {
-                            assert!(!self.patch.is_patched(bb));
+                            assert!(!self.patch.is_term_patched(bb));
                         }
                     }
                 }
@@ -483,7 +486,7 @@ impl<'a, 'tcx> ElaborateDropsCtxt<'a, 'tcx> {
                 ..
             } = data.terminator().kind
             {
-                assert!(!self.patch.is_patched(bb));
+                assert!(!self.patch.is_term_patched(bb));
 
                 let loc = Location { block: bb, statement_index: data.statements.len() };
                 let path = self.move_data().rev_lookup.find(destination.as_ref());
diff --git a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
index 9a6a153c7ba..7b3553e7afd 100644
--- a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
+++ b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
@@ -85,7 +85,7 @@ fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool {
             let lint_root = body.source_scopes[terminator.source_info.scope]
                 .local_data
                 .as_ref()
-                .assert_crate_local()
+                .unwrap_crate_local()
                 .lint_root;
             let span = terminator.source_info.span;
 
@@ -113,7 +113,7 @@ fn required_panic_strategy(tcx: TyCtxt<'_>, _: LocalCrate) -> Option<PanicStrate
         return Some(PanicStrategy::Abort);
     }
 
-    for def_id in tcx.hir().body_owners() {
+    for def_id in tcx.hir_body_owners() {
         if tcx.has_ffi_unwind_calls(def_id) {
             // Given that this crate is compiled in `-C panic=unwind`, the `AbortUnwindingCalls`
             // MIR pass will not be run on FFI-unwind call sites, therefore a foreign exception
diff --git a/compiler/rustc_mir_transform/src/function_item_references.rs b/compiler/rustc_mir_transform/src/function_item_references.rs
index 7e88925b2e1..38b5ccdb32e 100644
--- a/compiler/rustc_mir_transform/src/function_item_references.rs
+++ b/compiler/rustc_mir_transform/src/function_item_references.rs
@@ -154,19 +154,14 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
         let lint_root = self.body.source_scopes[source_info.scope]
             .local_data
             .as_ref()
-            .assert_crate_local()
+            .unwrap_crate_local()
             .lint_root;
         // FIXME: use existing printing routines to print the function signature
         let fn_sig = self.tcx.fn_sig(fn_id).instantiate(self.tcx, fn_args);
         let unsafety = fn_sig.safety().prefix_str();
         let abi = match fn_sig.abi() {
             ExternAbi::Rust => String::from(""),
-            other_abi => {
-                let mut s = String::from("extern \"");
-                s.push_str(other_abi.name());
-                s.push_str("\" ");
-                s
-            }
+            other_abi => format!("extern {other_abi} "),
         };
         let ident = self.tcx.item_ident(fn_id);
         let ty_params = fn_args.types().map(|ty| format!("{ty}"));
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index 1a2120ecd71..29521d42720 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -1259,7 +1259,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
 
         let layout = self.ecx.layout_of(lhs_ty).ok()?;
 
-        let as_bits = |value| {
+        let as_bits = |value: VnIndex| {
             let constant = self.evaluated[value].as_ref()?;
             if layout.backend_repr.is_scalar() {
                 let scalar = self.ecx.read_scalar(constant).discard_err()?;
@@ -1397,8 +1397,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             // or `*mut [i32]` <=> `*const [u64]`), including the common special
             // case of `*const T` <=> `*mut T`.
             if let Transmute = kind
-                && from.is_unsafe_ptr()
-                && to.is_unsafe_ptr()
+                && from.is_raw_ptr()
+                && to.is_raw_ptr()
                 && self.pointers_have_same_metadata(from, to)
             {
                 kind = PtrToPtr;
@@ -1558,8 +1558,11 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             return true;
         };
 
+        if layout.uninhabited {
+            return true;
+        }
+
         match layout.backend_repr {
-            BackendRepr::Uninhabited => true,
             BackendRepr::Scalar(a) => !a.is_always_valid(&self.ecx),
             BackendRepr::ScalarPair(a, b) => {
                 !a.is_always_valid(&self.ecx) || !b.is_always_valid(&self.ecx)
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index 07c031e852d..5981b5031c6 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -49,8 +49,7 @@ impl<'tcx> crate::MirPass<'tcx> for Inline {
         match sess.mir_opt_level() {
             0 | 1 => false,
             2 => {
-                (sess.opts.optimize == OptLevel::Default
-                    || sess.opts.optimize == OptLevel::Aggressive)
+                (sess.opts.optimize == OptLevel::More || sess.opts.optimize == OptLevel::Aggressive)
                     && sess.opts.incremental == None
             }
             _ => true,
@@ -464,7 +463,7 @@ fn inline<'tcx, T: Inliner<'tcx>>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> b
     let def_id = body.source.def_id();
 
     // Only do inlining into fn bodies.
-    if !tcx.hir().body_owner_kind(def_id).is_fn_or_closure() {
+    if !tcx.hir_body_owner_kind(def_id).is_fn_or_closure() {
         return false;
     }
 
@@ -1257,6 +1256,8 @@ impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
         // replaced down below anyways).
         if !matches!(terminator.kind, TerminatorKind::Return) {
             self.super_terminator(terminator, loc);
+        } else {
+            self.visit_source_info(&mut terminator.source_info);
         }
 
         match terminator.kind {
diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs
index 3dc4edaaa5a..da346dfc48c 100644
--- a/compiler/rustc_mir_transform/src/instsimplify.rs
+++ b/compiler/rustc_mir_transform/src/instsimplify.rs
@@ -36,7 +36,7 @@ impl<'tcx> crate::MirPass<'tcx> for InstSimplify {
             typing_env: body.typing_env(tcx),
         };
         let preserve_ub_checks =
-            attr::contains_name(tcx.hir().krate_attrs(), sym::rustc_preserve_ub_checks);
+            attr::contains_name(tcx.hir_krate_attrs(), sym::rustc_preserve_ub_checks);
         for block in body.basic_blocks.as_mut() {
             for statement in block.statements.iter_mut() {
                 match statement.kind {
diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs
index 17084eca6e3..0a72a9d669f 100644
--- a/compiler/rustc_mir_transform/src/jump_threading.rs
+++ b/compiler/rustc_mir_transform/src/jump_threading.rs
@@ -172,11 +172,11 @@ impl HasBottom for ConditionSet<'_> {
 }
 
 impl<'a> ConditionSet<'a> {
-    fn iter(self) -> impl Iterator<Item = Condition> + 'a {
+    fn iter(self) -> impl Iterator<Item = Condition> {
         self.0.iter().copied()
     }
 
-    fn iter_matches(self, value: ScalarInt) -> impl Iterator<Item = Condition> + 'a {
+    fn iter_matches(self, value: ScalarInt) -> impl Iterator<Item = Condition> {
         self.iter().filter(move |c| c.matches(value))
     }
 
@@ -467,7 +467,7 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
                 state.insert_place_idx(rhs, lhs, &self.map);
             }
             // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
-            Rvalue::Aggregate(box ref kind, ref operands) => {
+            Rvalue::Aggregate(box kind, operands) => {
                 let agg_ty = lhs_place.ty(self.body, self.tcx).ty;
                 let lhs = match kind {
                     // Do not support unions.
diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs
index 59de6ca84a7..f8db8de4e82 100644
--- a/compiler/rustc_mir_transform/src/known_panics_lint.rs
+++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs
@@ -509,7 +509,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                     // other overflow checks.
                     AssertKind::Overflow(*bin_op, eval_to_int(op1), eval_to_int(op2))
                 }
-                AssertKind::BoundsCheck { ref len, ref index } => {
+                AssertKind::BoundsCheck { len, index } => {
                     let len = eval_to_int(len);
                     let index = eval_to_int(index);
                     AssertKind::BoundsCheck { len, index }
@@ -782,10 +782,10 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
     fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
         self.super_terminator(terminator, location);
         match &terminator.kind {
-            TerminatorKind::Assert { expected, ref msg, ref cond, .. } => {
+            TerminatorKind::Assert { expected, msg, cond, .. } => {
                 self.check_assertion(*expected, msg, cond, location);
             }
-            TerminatorKind::SwitchInt { ref discr, ref targets } => {
+            TerminatorKind::SwitchInt { discr, targets } => {
                 if let Some(ref value) = self.eval_operand(discr)
                     && let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value))
                     && let Some(constant) = value_const.to_bits(value_const.size()).discard_err()
diff --git a/compiler/rustc_mir_transform/src/large_enums.rs b/compiler/rustc_mir_transform/src/large_enums.rs
index 1e546bfbeb3..47cb478fe33 100644
--- a/compiler/rustc_mir_transform/src/large_enums.rs
+++ b/compiler/rustc_mir_transform/src/large_enums.rs
@@ -6,6 +6,8 @@ use rustc_middle::ty::util::IntTypeExt;
 use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt};
 use rustc_session::Session;
 
+use crate::patch::MirPatch;
+
 /// A pass that seeks to optimize unnecessary moves of large enum types, if there is a large
 /// enough discrepancy between them.
 ///
@@ -41,31 +43,34 @@ impl<'tcx> crate::MirPass<'tcx> for EnumSizeOpt {
         let mut alloc_cache = FxHashMap::default();
         let typing_env = body.typing_env(tcx);
 
-        let blocks = body.basic_blocks.as_mut();
-        let local_decls = &mut body.local_decls;
+        let mut patch = MirPatch::new(body);
 
-        for bb in blocks {
-            bb.expand_statements(|st| {
+        for (block, data) in body.basic_blocks.as_mut().iter_enumerated_mut() {
+            for (statement_index, st) in data.statements.iter_mut().enumerate() {
                 let StatementKind::Assign(box (
                     lhs,
                     Rvalue::Use(Operand::Copy(rhs) | Operand::Move(rhs)),
                 )) = &st.kind
                 else {
-                    return None;
+                    continue;
                 };
 
-                let ty = lhs.ty(local_decls, tcx).ty;
+                let location = Location { block, statement_index };
 
-                let (adt_def, num_variants, alloc_id) =
-                    self.candidate(tcx, typing_env, ty, &mut alloc_cache)?;
+                let ty = lhs.ty(&body.local_decls, tcx).ty;
 
-                let source_info = st.source_info;
-                let span = source_info.span;
+                let Some((adt_def, num_variants, alloc_id)) =
+                    self.candidate(tcx, typing_env, ty, &mut alloc_cache)
+                else {
+                    continue;
+                };
+
+                let span = st.source_info.span;
 
                 let tmp_ty = Ty::new_array(tcx, tcx.types.usize, num_variants as u64);
-                let size_array_local = local_decls.push(LocalDecl::new(tmp_ty, span));
-                let store_live =
-                    Statement { source_info, kind: StatementKind::StorageLive(size_array_local) };
+                let size_array_local = patch.new_temp(tmp_ty, span);
+
+                let store_live = StatementKind::StorageLive(size_array_local);
 
                 let place = Place::from(size_array_local);
                 let constant_vals = ConstOperand {
@@ -77,108 +82,63 @@ impl<'tcx> crate::MirPass<'tcx> for EnumSizeOpt {
                     ),
                 };
                 let rval = Rvalue::Use(Operand::Constant(Box::new(constant_vals)));
-                let const_assign =
-                    Statement { source_info, kind: StatementKind::Assign(Box::new((place, rval))) };
-
-                let discr_place = Place::from(
-                    local_decls.push(LocalDecl::new(adt_def.repr().discr_type().to_ty(tcx), span)),
-                );
-                let store_discr = Statement {
-                    source_info,
-                    kind: StatementKind::Assign(Box::new((
-                        discr_place,
-                        Rvalue::Discriminant(*rhs),
-                    ))),
-                };
-
-                let discr_cast_place =
-                    Place::from(local_decls.push(LocalDecl::new(tcx.types.usize, span)));
-                let cast_discr = Statement {
-                    source_info,
-                    kind: StatementKind::Assign(Box::new((
-                        discr_cast_place,
-                        Rvalue::Cast(
-                            CastKind::IntToInt,
-                            Operand::Copy(discr_place),
-                            tcx.types.usize,
-                        ),
-                    ))),
-                };
-
-                let size_place =
-                    Place::from(local_decls.push(LocalDecl::new(tcx.types.usize, span)));
-                let store_size = Statement {
-                    source_info,
-                    kind: StatementKind::Assign(Box::new((
-                        size_place,
-                        Rvalue::Use(Operand::Copy(Place {
-                            local: size_array_local,
-                            projection: tcx
-                                .mk_place_elems(&[PlaceElem::Index(discr_cast_place.local)]),
-                        })),
-                    ))),
-                };
-
-                let dst =
-                    Place::from(local_decls.push(LocalDecl::new(Ty::new_mut_ptr(tcx, ty), span)));
-                let dst_ptr = Statement {
-                    source_info,
-                    kind: StatementKind::Assign(Box::new((
-                        dst,
-                        Rvalue::RawPtr(RawPtrKind::Mut, *lhs),
-                    ))),
-                };
+                let const_assign = StatementKind::Assign(Box::new((place, rval)));
+
+                let discr_place =
+                    Place::from(patch.new_temp(adt_def.repr().discr_type().to_ty(tcx), span));
+                let store_discr =
+                    StatementKind::Assign(Box::new((discr_place, Rvalue::Discriminant(*rhs))));
+
+                let discr_cast_place = Place::from(patch.new_temp(tcx.types.usize, span));
+                let cast_discr = StatementKind::Assign(Box::new((
+                    discr_cast_place,
+                    Rvalue::Cast(CastKind::IntToInt, Operand::Copy(discr_place), tcx.types.usize),
+                )));
+
+                let size_place = Place::from(patch.new_temp(tcx.types.usize, span));
+                let store_size = StatementKind::Assign(Box::new((
+                    size_place,
+                    Rvalue::Use(Operand::Copy(Place {
+                        local: size_array_local,
+                        projection: tcx.mk_place_elems(&[PlaceElem::Index(discr_cast_place.local)]),
+                    })),
+                )));
+
+                let dst = Place::from(patch.new_temp(Ty::new_mut_ptr(tcx, ty), span));
+                let dst_ptr =
+                    StatementKind::Assign(Box::new((dst, Rvalue::RawPtr(RawPtrKind::Mut, *lhs))));
 
                 let dst_cast_ty = Ty::new_mut_ptr(tcx, tcx.types.u8);
-                let dst_cast_place =
-                    Place::from(local_decls.push(LocalDecl::new(dst_cast_ty, span)));
-                let dst_cast = Statement {
-                    source_info,
-                    kind: StatementKind::Assign(Box::new((
-                        dst_cast_place,
-                        Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(dst), dst_cast_ty),
-                    ))),
-                };
+                let dst_cast_place = Place::from(patch.new_temp(dst_cast_ty, span));
+                let dst_cast = StatementKind::Assign(Box::new((
+                    dst_cast_place,
+                    Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(dst), dst_cast_ty),
+                )));
 
-                let src =
-                    Place::from(local_decls.push(LocalDecl::new(Ty::new_imm_ptr(tcx, ty), span)));
-                let src_ptr = Statement {
-                    source_info,
-                    kind: StatementKind::Assign(Box::new((
-                        src,
-                        Rvalue::RawPtr(RawPtrKind::Const, *rhs),
-                    ))),
-                };
+                let src = Place::from(patch.new_temp(Ty::new_imm_ptr(tcx, ty), span));
+                let src_ptr =
+                    StatementKind::Assign(Box::new((src, Rvalue::RawPtr(RawPtrKind::Const, *rhs))));
 
                 let src_cast_ty = Ty::new_imm_ptr(tcx, tcx.types.u8);
-                let src_cast_place =
-                    Place::from(local_decls.push(LocalDecl::new(src_cast_ty, span)));
-                let src_cast = Statement {
-                    source_info,
-                    kind: StatementKind::Assign(Box::new((
-                        src_cast_place,
-                        Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(src), src_cast_ty),
-                    ))),
-                };
+                let src_cast_place = Place::from(patch.new_temp(src_cast_ty, span));
+                let src_cast = StatementKind::Assign(Box::new((
+                    src_cast_place,
+                    Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(src), src_cast_ty),
+                )));
 
-                let deinit_old =
-                    Statement { source_info, kind: StatementKind::Deinit(Box::new(dst)) };
-
-                let copy_bytes = Statement {
-                    source_info,
-                    kind: StatementKind::Intrinsic(Box::new(
-                        NonDivergingIntrinsic::CopyNonOverlapping(CopyNonOverlapping {
-                            src: Operand::Copy(src_cast_place),
-                            dst: Operand::Copy(dst_cast_place),
-                            count: Operand::Copy(size_place),
-                        }),
-                    )),
-                };
+                let deinit_old = StatementKind::Deinit(Box::new(dst));
+
+                let copy_bytes = StatementKind::Intrinsic(Box::new(
+                    NonDivergingIntrinsic::CopyNonOverlapping(CopyNonOverlapping {
+                        src: Operand::Copy(src_cast_place),
+                        dst: Operand::Copy(dst_cast_place),
+                        count: Operand::Copy(size_place),
+                    }),
+                ));
 
-                let store_dead =
-                    Statement { source_info, kind: StatementKind::StorageDead(size_array_local) };
+                let store_dead = StatementKind::StorageDead(size_array_local);
 
-                let iter = [
+                let stmts = [
                     store_live,
                     const_assign,
                     store_discr,
@@ -191,14 +151,16 @@ impl<'tcx> crate::MirPass<'tcx> for EnumSizeOpt {
                     deinit_old,
                     copy_bytes,
                     store_dead,
-                ]
-                .into_iter();
+                ];
+                for stmt in stmts {
+                    patch.add_statement(location, stmt);
+                }
 
                 st.make_nop();
-
-                Some(iter)
-            });
+            }
         }
+
+        patch.apply(body);
     }
 
     fn is_required(&self) -> bool {
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
index b572f6ca0b3..04c9375b831 100644
--- a/compiler/rustc_mir_transform/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -49,10 +49,12 @@ mod check_pointers;
 mod cost_checker;
 mod cross_crate_inline;
 mod deduce_param_attrs;
+mod elaborate_drop;
 mod errors;
 mod ffi_unwind_calls;
 mod lint;
 mod lint_tail_expr_drop_order;
+mod patch;
 mod shim;
 mod ssa;
 
@@ -135,7 +137,6 @@ declare_passes! {
         Initial,
         Final
     };
-    mod deduplicate_blocks : DeduplicateBlocks;
     mod deref_separator : Derefer;
     mod dest_prop : DestinationPropagation;
     pub mod dump_mir : Marker;
@@ -313,11 +314,11 @@ fn is_mir_available(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
 /// MIR associated with them.
 fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> {
     // All body-owners have MIR associated with them.
-    let mut set: FxIndexSet<_> = tcx.hir().body_owners().collect();
+    let mut set: FxIndexSet<_> = tcx.hir_body_owners().collect();
 
     // Coroutine-closures (e.g. async closures) have an additional by-move MIR
     // body that isn't in the HIR.
-    for body_owner in tcx.hir().body_owners() {
+    for body_owner in tcx.hir_body_owners() {
         if let DefKind::Closure = tcx.def_kind(body_owner)
             && tcx.needs_coroutine_by_move_body_def_id(body_owner.to_def_id())
         {
@@ -469,7 +470,7 @@ fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> {
     }
 
     let body = tcx.mir_drops_elaborated_and_const_checked(def);
-    let body = match tcx.hir().body_const_context(def) {
+    let body = match tcx.hir_body_const_context(def) {
         // consts and statics do not have `optimized_mir`, so we can steal the body instead of
         // cloning it.
         Some(hir::ConstContext::Const { .. } | hir::ConstContext::Static(_)) => body.steal(),
@@ -700,7 +701,6 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
             &nrvo::RenameReturnPlace,
             &simplify::SimplifyLocals::Final,
             &multiple_return_terminators::MultipleReturnTerminators,
-            &deduplicate_blocks::DeduplicateBlocks,
             &large_enums::EnumSizeOpt { discrepancy: 128 },
             // Some cleanup necessary at least for LLVM and potentially other codegen backends.
             &add_call_guards::CriticalCallEdges,
@@ -729,7 +729,7 @@ fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> {
         return shim::build_adt_ctor(tcx, did.to_def_id());
     }
 
-    match tcx.hir().body_const_context(did) {
+    match tcx.hir_body_const_context(did) {
         // Run the `mir_for_ctfe` query, which depends on `mir_drops_elaborated_and_const_checked`
         // which we are going to steal below. Thus we need to run `mir_for_ctfe` first, so it
         // computes and caches its result.
diff --git a/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs b/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs
index 50d10883d2c..e3260e45bc5 100644
--- a/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs
+++ b/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs
@@ -186,7 +186,7 @@ fn true_significant_drop_ty<'tcx>(
         debug!(?name_str);
         match name_str[..] {
             // These are the types from Rust core ecosystem
-            ["sym" | "proc_macro2", ..]
+            ["syn" | "proc_macro2", ..]
             | ["core" | "std", "task", "LocalWaker" | "Waker"]
             | ["core" | "std", "task", "wake", "LocalWaker" | "Waker"] => Some(smallvec![]),
             // These are important types from Rust ecosystem
diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs
index 500116580d5..9db37bf5a07 100644
--- a/compiler/rustc_mir_transform/src/match_branches.rs
+++ b/compiler/rustc_mir_transform/src/match_branches.rs
@@ -2,7 +2,6 @@ use std::iter;
 
 use rustc_abi::Integer;
 use rustc_index::IndexSlice;
-use rustc_middle::mir::patch::MirPatch;
 use rustc_middle::mir::*;
 use rustc_middle::ty::layout::{IntegerExt, TyAndLayout};
 use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
@@ -10,6 +9,7 @@ use rustc_type_ir::TyKind::*;
 use tracing::instrument;
 
 use super::simplify::simplify_cfg;
+use crate::patch::MirPatch;
 
 pub(super) struct MatchBranchSimplification;
 
diff --git a/compiler/rustc_mir_transform/src/mentioned_items.rs b/compiler/rustc_mir_transform/src/mentioned_items.rs
index f5c57418467..9fd8d81d64a 100644
--- a/compiler/rustc_mir_transform/src/mentioned_items.rs
+++ b/compiler/rustc_mir_transform/src/mentioned_items.rs
@@ -51,7 +51,7 @@ impl<'tcx> Visitor<'tcx> for MentionedItemsVisitor<'_, 'tcx> {
                 let ty = place.ty(self.body, self.tcx).ty;
                 self.mentioned_items.push(Spanned { node: MentionedItem::Drop(ty), span: span() });
             }
-            mir::TerminatorKind::InlineAsm { ref operands, .. } => {
+            mir::TerminatorKind::InlineAsm { operands, .. } => {
                 for op in operands {
                     match *op {
                         mir::InlineAsmOperand::SymFn { ref value } => {
diff --git a/compiler/rustc_mir_transform/src/patch.rs b/compiler/rustc_mir_transform/src/patch.rs
new file mode 100644
index 00000000000..6a177faeac8
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/patch.rs
@@ -0,0 +1,285 @@
+use rustc_index::{Idx, IndexVec};
+use rustc_middle::mir::*;
+use rustc_middle::ty::Ty;
+use rustc_span::Span;
+use tracing::debug;
+
+/// This struct lets you "patch" a MIR body, i.e. modify it. You can queue up
+/// various changes, such as the addition of new statements and basic blocks
+/// and replacement of terminators, and then apply the queued changes all at
+/// once with `apply`. This is useful for MIR transformation passes.
+pub(crate) struct MirPatch<'tcx> {
+    term_patch_map: IndexVec<BasicBlock, Option<TerminatorKind<'tcx>>>,
+    new_blocks: Vec<BasicBlockData<'tcx>>,
+    new_statements: Vec<(Location, StatementKind<'tcx>)>,
+    new_locals: Vec<LocalDecl<'tcx>>,
+    resume_block: Option<BasicBlock>,
+    // Only for unreachable in cleanup path.
+    unreachable_cleanup_block: Option<BasicBlock>,
+    // Only for unreachable not in cleanup path.
+    unreachable_no_cleanup_block: Option<BasicBlock>,
+    // Cached block for UnwindTerminate (with reason)
+    terminate_block: Option<(BasicBlock, UnwindTerminateReason)>,
+    body_span: Span,
+    next_local: usize,
+}
+
+impl<'tcx> MirPatch<'tcx> {
+    /// Creates a new, empty patch.
+    pub(crate) fn new(body: &Body<'tcx>) -> Self {
+        let mut result = MirPatch {
+            term_patch_map: IndexVec::from_elem(None, &body.basic_blocks),
+            new_blocks: vec![],
+            new_statements: vec![],
+            new_locals: vec![],
+            next_local: body.local_decls.len(),
+            resume_block: None,
+            unreachable_cleanup_block: None,
+            unreachable_no_cleanup_block: None,
+            terminate_block: None,
+            body_span: body.span,
+        };
+
+        for (bb, block) in body.basic_blocks.iter_enumerated() {
+            // Check if we already have a resume block
+            if matches!(block.terminator().kind, TerminatorKind::UnwindResume)
+                && block.statements.is_empty()
+            {
+                result.resume_block = Some(bb);
+                continue;
+            }
+
+            // Check if we already have an unreachable block
+            if matches!(block.terminator().kind, TerminatorKind::Unreachable)
+                && block.statements.is_empty()
+            {
+                if block.is_cleanup {
+                    result.unreachable_cleanup_block = Some(bb);
+                } else {
+                    result.unreachable_no_cleanup_block = Some(bb);
+                }
+                continue;
+            }
+
+            // Check if we already have a terminate block
+            if let TerminatorKind::UnwindTerminate(reason) = block.terminator().kind
+                && block.statements.is_empty()
+            {
+                result.terminate_block = Some((bb, reason));
+                continue;
+            }
+        }
+
+        result
+    }
+
+    pub(crate) fn resume_block(&mut self) -> BasicBlock {
+        if let Some(bb) = self.resume_block {
+            return bb;
+        }
+
+        let bb = self.new_block(BasicBlockData {
+            statements: vec![],
+            terminator: Some(Terminator {
+                source_info: SourceInfo::outermost(self.body_span),
+                kind: TerminatorKind::UnwindResume,
+            }),
+            is_cleanup: true,
+        });
+        self.resume_block = Some(bb);
+        bb
+    }
+
+    pub(crate) fn unreachable_cleanup_block(&mut self) -> BasicBlock {
+        if let Some(bb) = self.unreachable_cleanup_block {
+            return bb;
+        }
+
+        let bb = self.new_block(BasicBlockData {
+            statements: vec![],
+            terminator: Some(Terminator {
+                source_info: SourceInfo::outermost(self.body_span),
+                kind: TerminatorKind::Unreachable,
+            }),
+            is_cleanup: true,
+        });
+        self.unreachable_cleanup_block = Some(bb);
+        bb
+    }
+
+    pub(crate) fn unreachable_no_cleanup_block(&mut self) -> BasicBlock {
+        if let Some(bb) = self.unreachable_no_cleanup_block {
+            return bb;
+        }
+
+        let bb = self.new_block(BasicBlockData {
+            statements: vec![],
+            terminator: Some(Terminator {
+                source_info: SourceInfo::outermost(self.body_span),
+                kind: TerminatorKind::Unreachable,
+            }),
+            is_cleanup: false,
+        });
+        self.unreachable_no_cleanup_block = Some(bb);
+        bb
+    }
+
+    pub(crate) fn terminate_block(&mut self, reason: UnwindTerminateReason) -> BasicBlock {
+        if let Some((cached_bb, cached_reason)) = self.terminate_block
+            && reason == cached_reason
+        {
+            return cached_bb;
+        }
+
+        let bb = self.new_block(BasicBlockData {
+            statements: vec![],
+            terminator: Some(Terminator {
+                source_info: SourceInfo::outermost(self.body_span),
+                kind: TerminatorKind::UnwindTerminate(reason),
+            }),
+            is_cleanup: true,
+        });
+        self.terminate_block = Some((bb, reason));
+        bb
+    }
+
+    /// Has a replacement of this block's terminator been queued in this patch?
+    pub(crate) fn is_term_patched(&self, bb: BasicBlock) -> bool {
+        self.term_patch_map[bb].is_some()
+    }
+
+    /// Queues the addition of a new temporary with additional local info.
+    pub(crate) fn new_local_with_info(
+        &mut self,
+        ty: Ty<'tcx>,
+        span: Span,
+        local_info: LocalInfo<'tcx>,
+    ) -> Local {
+        let index = self.next_local;
+        self.next_local += 1;
+        let mut new_decl = LocalDecl::new(ty, span);
+        **new_decl.local_info.as_mut().unwrap_crate_local() = local_info;
+        self.new_locals.push(new_decl);
+        Local::new(index)
+    }
+
+    /// Queues the addition of a new temporary.
+    pub(crate) fn new_temp(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
+        let index = self.next_local;
+        self.next_local += 1;
+        self.new_locals.push(LocalDecl::new(ty, span));
+        Local::new(index)
+    }
+
+    /// Returns the type of a local that's newly-added in the patch.
+    pub(crate) fn local_ty(&self, local: Local) -> Ty<'tcx> {
+        let local = local.as_usize();
+        assert!(local < self.next_local);
+        let new_local_idx = self.new_locals.len() - (self.next_local - local);
+        self.new_locals[new_local_idx].ty
+    }
+
+    /// Queues the addition of a new basic block.
+    pub(crate) fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock {
+        let block = BasicBlock::new(self.term_patch_map.len());
+        debug!("MirPatch: new_block: {:?}: {:?}", block, data);
+        self.new_blocks.push(data);
+        self.term_patch_map.push(None);
+        block
+    }
+
+    /// Queues the replacement of a block's terminator.
+    pub(crate) fn patch_terminator(&mut self, block: BasicBlock, new: TerminatorKind<'tcx>) {
+        assert!(self.term_patch_map[block].is_none());
+        debug!("MirPatch: patch_terminator({:?}, {:?})", block, new);
+        self.term_patch_map[block] = Some(new);
+    }
+
+    /// Queues the insertion of a statement at a given location. The statement
+    /// currently at that location, and all statements that follow, are shifted
+    /// down. If multiple statements are queued for addition at the same
+    /// location, the final statement order after calling `apply` will match
+    /// the queue insertion order.
+    ///
+    /// E.g. if we have `s0` at location `loc` and do these calls:
+    ///
+    ///   p.add_statement(loc, s1);
+    ///   p.add_statement(loc, s2);
+    ///   p.apply(body);
+    ///
+    /// then the final order will be `s1, s2, s0`, with `s1` at `loc`.
+    pub(crate) fn add_statement(&mut self, loc: Location, stmt: StatementKind<'tcx>) {
+        debug!("MirPatch: add_statement({:?}, {:?})", loc, stmt);
+        self.new_statements.push((loc, stmt));
+    }
+
+    /// Like `add_statement`, but specialized for assignments.
+    pub(crate) fn add_assign(&mut self, loc: Location, place: Place<'tcx>, rv: Rvalue<'tcx>) {
+        self.add_statement(loc, StatementKind::Assign(Box::new((place, rv))));
+    }
+
+    /// Applies the queued changes.
+    pub(crate) fn apply(self, body: &mut Body<'tcx>) {
+        debug!(
+            "MirPatch: {:?} new temps, starting from index {}: {:?}",
+            self.new_locals.len(),
+            body.local_decls.len(),
+            self.new_locals
+        );
+        debug!(
+            "MirPatch: {} new blocks, starting from index {}",
+            self.new_blocks.len(),
+            body.basic_blocks.len()
+        );
+        let bbs = if self.term_patch_map.is_empty() && self.new_blocks.is_empty() {
+            body.basic_blocks.as_mut_preserves_cfg()
+        } else {
+            body.basic_blocks.as_mut()
+        };
+        bbs.extend(self.new_blocks);
+        body.local_decls.extend(self.new_locals);
+        for (src, patch) in self.term_patch_map.into_iter_enumerated() {
+            if let Some(patch) = patch {
+                debug!("MirPatch: patching block {:?}", src);
+                bbs[src].terminator_mut().kind = patch;
+            }
+        }
+
+        let mut new_statements = self.new_statements;
+
+        // This must be a stable sort to provide the ordering described in the
+        // comment for `add_statement`.
+        new_statements.sort_by_key(|s| s.0);
+
+        let mut delta = 0;
+        let mut last_bb = START_BLOCK;
+        for (mut loc, stmt) in new_statements {
+            if loc.block != last_bb {
+                delta = 0;
+                last_bb = loc.block;
+            }
+            debug!("MirPatch: adding statement {:?} at loc {:?}+{}", stmt, loc, delta);
+            loc.statement_index += delta;
+            let source_info = Self::source_info_for_index(&body[loc.block], loc);
+            body[loc.block]
+                .statements
+                .insert(loc.statement_index, Statement { source_info, kind: stmt });
+            delta += 1;
+        }
+    }
+
+    fn source_info_for_index(data: &BasicBlockData<'_>, loc: Location) -> SourceInfo {
+        match data.statements.get(loc.statement_index) {
+            Some(stmt) => stmt.source_info,
+            None => data.terminator().source_info,
+        }
+    }
+
+    pub(crate) fn source_info_for_location(&self, body: &Body<'tcx>, loc: Location) -> SourceInfo {
+        let data = match loc.block.index().checked_sub(body.basic_blocks.len()) {
+            Some(new) => &self.new_blocks[new],
+            None => &body[loc.block],
+        };
+        Self::source_info_for_index(data, loc)
+    }
+}
diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
index 9cdd52bec7b..1dd34005d66 100644
--- a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
+++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
@@ -1,10 +1,11 @@
 use rustc_index::bit_set::DenseBitSet;
-use rustc_middle::mir::patch::MirPatch;
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
 use rustc_target::spec::PanicStrategy;
 use tracing::debug;
 
+use crate::patch::MirPatch;
+
 /// A pass that removes noop landing pads and replaces jumps to them with
 /// `UnwindAction::Continue`. This is important because otherwise LLVM generates
 /// terrible code for these.
diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs
index f7ec0f740d3..34074a84e28 100644
--- a/compiler/rustc_mir_transform/src/shim.rs
+++ b/compiler/rustc_mir_transform/src/shim.rs
@@ -6,7 +6,6 @@ use rustc_hir as hir;
 use rustc_hir::def_id::DefId;
 use rustc_hir::lang_items::LangItem;
 use rustc_index::{Idx, IndexVec};
-use rustc_middle::mir::patch::MirPatch;
 use rustc_middle::mir::*;
 use rustc_middle::query::Providers;
 use rustc_middle::ty::adjustment::PointerCoercion;
@@ -14,11 +13,12 @@ use rustc_middle::ty::{
     self, CoroutineArgs, CoroutineArgsExt, EarlyBinder, GenericArgs, Ty, TyCtxt,
 };
 use rustc_middle::{bug, span_bug};
-use rustc_mir_dataflow::elaborate_drops::{self, DropElaborator, DropFlagMode, DropStyle};
 use rustc_span::source_map::Spanned;
 use rustc_span::{DUMMY_SP, Span};
 use tracing::{debug, instrument};
 
+use crate::elaborate_drop::{DropElaborator, DropFlagMode, DropStyle, Unwind, elaborate_drop};
+use crate::patch::MirPatch;
 use crate::{
     abort_unwinding_calls, add_call_guards, add_moves_for_packed_drops, deref_separator, inline,
     instsimplify, mentioned_items, pass_manager as pm, remove_noop_landing_pads, simplify,
@@ -283,13 +283,13 @@ fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>)
                 DropShimElaborator { body: &body, patch: MirPatch::new(&body), tcx, typing_env };
             let dropee = tcx.mk_place_deref(dropee_ptr);
             let resume_block = elaborator.patch.resume_block();
-            elaborate_drops::elaborate_drop(
+            elaborate_drop(
                 &mut elaborator,
                 source_info,
                 dropee,
                 (),
                 return_block,
-                elaborate_drops::Unwind::To(resume_block),
+                Unwind::To(resume_block),
                 START_BLOCK,
             );
             elaborator.patch
@@ -350,6 +350,9 @@ impl fmt::Debug for DropShimElaborator<'_, '_> {
 impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> {
     type Path = ();
 
+    fn patch_ref(&self) -> &MirPatch<'tcx> {
+        &self.patch
+    }
     fn patch(&mut self) -> &mut MirPatch<'tcx> {
         &mut self.patch
     }
diff --git a/compiler/rustc_mir_transform/src/simplify.rs b/compiler/rustc_mir_transform/src/simplify.rs
index 67070f03ded..84905f4a400 100644
--- a/compiler/rustc_mir_transform/src/simplify.rs
+++ b/compiler/rustc_mir_transform/src/simplify.rs
@@ -568,9 +568,9 @@ fn remove_unused_definitions_helper(used_locals: &mut UsedLocals, body: &mut Bod
                     }
                     StatementKind::Assign(box (place, _)) => used_locals.is_used(place.local),
 
-                    StatementKind::SetDiscriminant { ref place, .. }
-                    | StatementKind::BackwardIncompatibleDropHint { ref place, reason: _ }
-                    | StatementKind::Deinit(ref place) => used_locals.is_used(place.local),
+                    StatementKind::SetDiscriminant { place, .. }
+                    | StatementKind::BackwardIncompatibleDropHint { place, reason: _ }
+                    | StatementKind::Deinit(place) => used_locals.is_used(place.local),
                     StatementKind::Nop => false,
                     _ => true,
                 };
diff --git a/compiler/rustc_mir_transform/src/simplify_branches.rs b/compiler/rustc_mir_transform/src/simplify_branches.rs
index 12c3503879f..886f4d6e509 100644
--- a/compiler/rustc_mir_transform/src/simplify_branches.rs
+++ b/compiler/rustc_mir_transform/src/simplify_branches.rs
@@ -24,7 +24,7 @@ impl<'tcx> crate::MirPass<'tcx> for SimplifyConstCondition {
                 // Simplify `assume` of a known value: either a NOP or unreachable.
                 if let StatementKind::Intrinsic(box ref intrinsic) = stmt.kind
                     && let NonDivergingIntrinsic::Assume(discr) = intrinsic
-                    && let Operand::Constant(ref c) = discr
+                    && let Operand::Constant(c) = discr
                     && let Some(constant) = c.const_.try_eval_bool(tcx, typing_env)
                 {
                     if constant {
diff --git a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
index 21bc51ecca1..bd008230731 100644
--- a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
+++ b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
@@ -89,10 +89,10 @@ impl<'tcx> crate::MirPass<'tcx> for SimplifyComparisonIntegral {
 
                 use Operand::*;
                 match rhs {
-                    Rvalue::BinaryOp(_, box (ref mut left @ Move(_), Constant(_))) => {
+                    Rvalue::BinaryOp(_, box (left @ Move(_), Constant(_))) => {
                         *left = Copy(opt.to_switch_on);
                     }
-                    Rvalue::BinaryOp(_, box (Constant(_), ref mut right @ Move(_))) => {
+                    Rvalue::BinaryOp(_, box (Constant(_), right @ Move(_))) => {
                         *right = Copy(opt.to_switch_on);
                     }
                     _ => (),
diff --git a/compiler/rustc_mir_transform/src/single_use_consts.rs b/compiler/rustc_mir_transform/src/single_use_consts.rs
index c5e951eb8b2..02caa92ad3f 100644
--- a/compiler/rustc_mir_transform/src/single_use_consts.rs
+++ b/compiler/rustc_mir_transform/src/single_use_consts.rs
@@ -48,9 +48,11 @@ impl<'tcx> crate::MirPass<'tcx> for SingleUseConsts {
 
             // We're only changing an operand, not the terminator kinds or successors
             let basic_blocks = body.basic_blocks.as_mut_preserves_cfg();
-            let init_statement =
-                basic_blocks[init_loc.block].statements[init_loc.statement_index].replace_nop();
-            let StatementKind::Assign(place_and_rvalue) = init_statement.kind else {
+            let init_statement_kind = std::mem::replace(
+                &mut basic_blocks[init_loc.block].statements[init_loc.statement_index].kind,
+                StatementKind::Nop,
+            );
+            let StatementKind::Assign(place_and_rvalue) = init_statement_kind else {
                 bug!("No longer an assign?");
             };
             let (place, rvalue) = *place_and_rvalue;
diff --git a/compiler/rustc_mir_transform/src/sroa.rs b/compiler/rustc_mir_transform/src/sroa.rs
index 92e5c8a9ca4..7c6ccc89c4f 100644
--- a/compiler/rustc_mir_transform/src/sroa.rs
+++ b/compiler/rustc_mir_transform/src/sroa.rs
@@ -4,13 +4,14 @@ use rustc_hir::LangItem;
 use rustc_index::IndexVec;
 use rustc_index::bit_set::{DenseBitSet, GrowableBitSet};
 use rustc_middle::bug;
-use rustc_middle::mir::patch::MirPatch;
 use rustc_middle::mir::visit::*;
 use rustc_middle::mir::*;
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_mir_dataflow::value_analysis::{excluded_locals, iter_fields};
 use tracing::{debug, instrument};
 
+use crate::patch::MirPatch;
+
 pub(super) struct ScalarReplacementOfAggregates;
 
 impl<'tcx> crate::MirPass<'tcx> for ScalarReplacementOfAggregates {
@@ -184,7 +185,7 @@ impl<'tcx> ReplacementMap<'tcx> {
     fn place_fragments(
         &self,
         place: Place<'tcx>,
-    ) -> Option<impl Iterator<Item = (FieldIdx, Ty<'tcx>, Local)> + '_> {
+    ) -> Option<impl Iterator<Item = (FieldIdx, Ty<'tcx>, Local)>> {
         let local = place.as_local()?;
         let fields = self.fragments[local].as_ref()?;
         Some(fields.iter_enumerated().filter_map(|(field, &opt_ty_local)| {
diff --git a/compiler/rustc_mir_transform/src/ssa.rs b/compiler/rustc_mir_transform/src/ssa.rs
index 9f769077e77..3d512fb064e 100644
--- a/compiler/rustc_mir_transform/src/ssa.rs
+++ b/compiler/rustc_mir_transform/src/ssa.rs
@@ -138,7 +138,7 @@ impl SsaLocals {
     pub(super) fn assignments<'a, 'tcx>(
         &'a self,
         body: &'a Body<'tcx>,
-    ) -> impl Iterator<Item = (Local, &'a Rvalue<'tcx>, Location)> + 'a {
+    ) -> impl Iterator<Item = (Local, &'a Rvalue<'tcx>, Location)> {
         self.assignment_order.iter().filter_map(|&local| {
             if let Set1::One(DefLocation::Assignment(loc)) = self.assignments[local] {
                 let stmt = body.stmt_at(loc).left()?;
diff --git a/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs b/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs
index 1ff7043ed14..6ccec5b6f21 100644
--- a/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs
+++ b/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs
@@ -3,7 +3,6 @@
 use rustc_abi::Variants;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_middle::bug;
-use rustc_middle::mir::patch::MirPatch;
 use rustc_middle::mir::{
     BasicBlock, BasicBlockData, BasicBlocks, Body, Local, Operand, Rvalue, StatementKind,
     TerminatorKind,
@@ -12,6 +11,8 @@ use rustc_middle::ty::layout::TyAndLayout;
 use rustc_middle::ty::{Ty, TyCtxt};
 use tracing::trace;
 
+use crate::patch::MirPatch;
+
 pub(super) struct UnreachableEnumBranching;
 
 fn get_discriminant_local(terminator: &TerminatorKind<'_>) -> Option<Local> {
diff --git a/compiler/rustc_mir_transform/src/unreachable_prop.rs b/compiler/rustc_mir_transform/src/unreachable_prop.rs
index 1e5d9469c03..13fb5b3e56f 100644
--- a/compiler/rustc_mir_transform/src/unreachable_prop.rs
+++ b/compiler/rustc_mir_transform/src/unreachable_prop.rs
@@ -6,10 +6,11 @@ use rustc_abi::Size;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_middle::bug;
 use rustc_middle::mir::interpret::Scalar;
-use rustc_middle::mir::patch::MirPatch;
 use rustc_middle::mir::*;
 use rustc_middle::ty::{self, TyCtxt};
 
+use crate::patch::MirPatch;
+
 pub(super) struct UnreachablePropagation;
 
 impl crate::MirPass<'_> for UnreachablePropagation {