about summary refs log tree commit diff
path: root/compiler/rustc_mir_transform/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_mir_transform/src')
-rw-r--r--compiler/rustc_mir_transform/src/add_retag.rs3
-rw-r--r--compiler/rustc_mir_transform/src/const_debuginfo.rs102
-rw-r--r--compiler/rustc_mir_transform/src/coroutine.rs6
-rw-r--r--compiler/rustc_mir_transform/src/coroutine/by_move_body.rs6
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters.rs5
-rw-r--r--compiler/rustc_mir_transform/src/coverage/graph.rs89
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mappings.rs33
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs8
-rw-r--r--compiler/rustc_mir_transform/src/coverage/query.rs2
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs473
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs190
-rw-r--r--compiler/rustc_mir_transform/src/coverage/tests.rs106
-rw-r--r--compiler/rustc_mir_transform/src/dataflow_const_prop.rs21
-rw-r--r--compiler/rustc_mir_transform/src/dump_mir.rs3
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drops.rs4
-rw-r--r--compiler/rustc_mir_transform/src/errors.rs2
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs200
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs65
-rw-r--r--compiler/rustc_mir_transform/src/inline/cycle.rs32
-rw-r--r--compiler/rustc_mir_transform/src/instsimplify.rs2
-rw-r--r--compiler/rustc_mir_transform/src/known_panics_lint.rs31
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs22
-rw-r--r--compiler/rustc_mir_transform/src/match_branches.rs2
-rw-r--r--compiler/rustc_mir_transform/src/normalize_array_len.rs2
-rw-r--r--compiler/rustc_mir_transform/src/promote_consts.rs8
-rw-r--r--compiler/rustc_mir_transform/src/required_consts.rs2
-rw-r--r--compiler/rustc_mir_transform/src/reveal_all.rs4
-rw-r--r--compiler/rustc_mir_transform/src/shim.rs55
-rw-r--r--compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs4
-rw-r--r--compiler/rustc_mir_transform/src/simplify_comparison_integral.rs2
-rw-r--r--compiler/rustc_mir_transform/src/single_use_consts.rs199
-rw-r--r--compiler/rustc_mir_transform/src/sroa.rs3
-rw-r--r--compiler/rustc_mir_transform/src/validate.rs13
33 files changed, 837 insertions, 862 deletions
diff --git a/compiler/rustc_mir_transform/src/add_retag.rs b/compiler/rustc_mir_transform/src/add_retag.rs
index f880476cec2..16977a63c59 100644
--- a/compiler/rustc_mir_transform/src/add_retag.rs
+++ b/compiler/rustc_mir_transform/src/add_retag.rs
@@ -4,6 +4,7 @@
 //! of MIR building, and only after this pass we think of the program has having the
 //! normal MIR semantics.
 
+use rustc_hir::LangItem;
 use rustc_middle::mir::*;
 use rustc_middle::ty::{self, Ty, TyCtxt};
 
@@ -27,7 +28,7 @@ fn may_contain_reference<'tcx>(ty: Ty<'tcx>, depth: u32, tcx: TyCtxt<'tcx>) -> b
         // References and Boxes (`noalias` sources)
         ty::Ref(..) => true,
         ty::Adt(..) if ty.is_box() => true,
-        ty::Adt(adt, _) if Some(adt.did()) == tcx.lang_items().ptr_unique() => true,
+        ty::Adt(adt, _) if tcx.is_lang_item(adt.did(), LangItem::PtrUnique) => true,
         // Compound types: recurse
         ty::Array(ty, _) | ty::Slice(ty) => {
             // This does not branch so we keep the depth the same.
diff --git a/compiler/rustc_mir_transform/src/const_debuginfo.rs b/compiler/rustc_mir_transform/src/const_debuginfo.rs
deleted file mode 100644
index e4e4270c499..00000000000
--- a/compiler/rustc_mir_transform/src/const_debuginfo.rs
+++ /dev/null
@@ -1,102 +0,0 @@
-//! Finds locals which are assigned once to a const and unused except for debuginfo and converts
-//! their debuginfo to use the const directly, allowing the local to be removed.
-
-use rustc_middle::{
-    mir::{
-        visit::{PlaceContext, Visitor},
-        Body, ConstOperand, Local, Location, Operand, Rvalue, StatementKind, VarDebugInfoContents,
-    },
-    ty::TyCtxt,
-};
-
-use crate::MirPass;
-use rustc_index::{bit_set::BitSet, IndexVec};
-
-pub struct ConstDebugInfo;
-
-impl<'tcx> MirPass<'tcx> for ConstDebugInfo {
-    fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
-        sess.mir_opt_level() > 0
-    }
-
-    fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-        trace!("running ConstDebugInfo on {:?}", body.source);
-
-        for (local, constant) in find_optimization_opportunities(body) {
-            for debuginfo in &mut body.var_debug_info {
-                if let VarDebugInfoContents::Place(p) = debuginfo.value {
-                    if p.local == local && p.projection.is_empty() {
-                        trace!(
-                            "changing debug info for {:?} from place {:?} to constant {:?}",
-                            debuginfo.name,
-                            p,
-                            constant
-                        );
-                        debuginfo.value = VarDebugInfoContents::Const(constant);
-                    }
-                }
-            }
-        }
-    }
-}
-
-struct LocalUseVisitor {
-    local_mutating_uses: IndexVec<Local, u8>,
-    local_assignment_locations: IndexVec<Local, Option<Location>>,
-}
-
-fn find_optimization_opportunities<'tcx>(body: &Body<'tcx>) -> Vec<(Local, ConstOperand<'tcx>)> {
-    let mut visitor = LocalUseVisitor {
-        local_mutating_uses: IndexVec::from_elem(0, &body.local_decls),
-        local_assignment_locations: IndexVec::from_elem(None, &body.local_decls),
-    };
-
-    visitor.visit_body(body);
-
-    let mut locals_to_debuginfo = BitSet::new_empty(body.local_decls.len());
-    for debuginfo in &body.var_debug_info {
-        if let VarDebugInfoContents::Place(p) = debuginfo.value
-            && let Some(l) = p.as_local()
-        {
-            locals_to_debuginfo.insert(l);
-        }
-    }
-
-    let mut eligible_locals = Vec::new();
-    for (local, mutating_uses) in visitor.local_mutating_uses.drain_enumerated(..) {
-        if mutating_uses != 1 || !locals_to_debuginfo.contains(local) {
-            continue;
-        }
-
-        if let Some(location) = visitor.local_assignment_locations[local] {
-            let bb = &body[location.block];
-
-            // The value is assigned as the result of a call, not a constant
-            if bb.statements.len() == location.statement_index {
-                continue;
-            }
-
-            if let StatementKind::Assign(box (p, Rvalue::Use(Operand::Constant(box c)))) =
-                &bb.statements[location.statement_index].kind
-            {
-                if let Some(local) = p.as_local() {
-                    eligible_locals.push((local, *c));
-                }
-            }
-        }
-    }
-
-    eligible_locals
-}
-
-impl Visitor<'_> for LocalUseVisitor {
-    fn visit_local(&mut self, local: Local, context: PlaceContext, location: Location) {
-        if context.is_mutating_use() {
-            self.local_mutating_uses[local] = self.local_mutating_uses[local].saturating_add(1);
-
-            if context.is_place_assignment() {
-                self.local_assignment_locations[local] = Some(location);
-            }
-        }
-    }
-}
diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs
index ade2ac0080e..bf79b4e133a 100644
--- a/compiler/rustc_mir_transform/src/coroutine.rs
+++ b/compiler/rustc_mir_transform/src/coroutine.rs
@@ -68,7 +68,7 @@ use rustc_index::{Idx, IndexVec};
 use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
 use rustc_middle::mir::*;
 use rustc_middle::ty::CoroutineArgs;
-use rustc_middle::ty::InstanceDef;
+use rustc_middle::ty::InstanceKind;
 use rustc_middle::ty::{self, CoroutineArgsExt, Ty, TyCtxt};
 use rustc_middle::{bug, span_bug};
 use rustc_mir_dataflow::impls::{
@@ -1276,7 +1276,7 @@ fn create_coroutine_drop_shim<'tcx>(
     // Update the body's def to become the drop glue.
     let coroutine_instance = body.source.instance;
     let drop_in_place = tcx.require_lang_item(LangItem::DropInPlace, None);
-    let drop_instance = InstanceDef::DropGlue(drop_in_place, Some(coroutine_ty));
+    let drop_instance = InstanceKind::DropGlue(drop_in_place, Some(coroutine_ty));
 
     // Temporary change MirSource to coroutine's instance so that dump_mir produces more sensible
     // filename.
@@ -1608,7 +1608,7 @@ fn check_field_tys_sized<'tcx>(
     let infcx = tcx.infer_ctxt().ignoring_regions().build();
     let param_env = tcx.param_env(def_id);
 
-    let ocx = ObligationCtxt::new(&infcx);
+    let ocx = ObligationCtxt::new_with_diagnostics(&infcx);
     for field_ty in &coroutine_layout.field_tys {
         ocx.register_bound(
             ObligationCause::new(
diff --git a/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs b/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs
index 10c0567eb4b..69d21a63f55 100644
--- a/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs
+++ b/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs
@@ -75,7 +75,7 @@ use rustc_middle::bug;
 use rustc_middle::hir::place::{Projection, ProjectionKind};
 use rustc_middle::mir::visit::MutVisitor;
 use rustc_middle::mir::{self, dump_mir, MirPass};
-use rustc_middle::ty::{self, InstanceDef, Ty, TyCtxt, TypeVisitableExt};
+use rustc_middle::ty::{self, InstanceKind, Ty, TyCtxt, TypeVisitableExt};
 use rustc_target::abi::{FieldIdx, VariantIdx};
 
 pub struct ByMoveBody;
@@ -102,7 +102,7 @@ impl<'tcx> MirPass<'tcx> for ByMoveBody {
 
         // We don't need to generate a by-move coroutine if the coroutine body was
         // produced by the `CoroutineKindShim`, since it's already by-move.
-        if matches!(body.source.instance, ty::InstanceDef::CoroutineKindShim { .. }) {
+        if matches!(body.source.instance, ty::InstanceKind::CoroutineKindShim { .. }) {
             return;
         }
 
@@ -193,7 +193,7 @@ impl<'tcx> MirPass<'tcx> for ByMoveBody {
         MakeByMoveBody { tcx, field_remapping, by_move_coroutine_ty }.visit_body(&mut by_move_body);
         dump_mir(tcx, false, "coroutine_by_move", &0, &by_move_body, |_, _| Ok(()));
         // FIXME: use query feeding to generate the body right here and then only store the `DefId` of the new body.
-        by_move_body.source = mir::MirSource::from_instance(InstanceDef::CoroutineKindShim {
+        by_move_body.source = mir::MirSource::from_instance(InstanceKind::CoroutineKindShim {
             coroutine_def_id: coroutine_def_id.to_def_id(),
         });
         body.coroutine.as_mut().unwrap().by_move_body = Some(by_move_body);
diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs
index b5968517d77..a8b0f4a8d6d 100644
--- a/compiler/rustc_mir_transform/src/coverage/counters.rs
+++ b/compiler/rustc_mir_transform/src/coverage/counters.rs
@@ -168,11 +168,6 @@ impl CoverageCounters {
         self.counter_increment_sites.len()
     }
 
-    #[cfg(test)]
-    pub(super) fn num_expressions(&self) -> usize {
-        self.expressions.len()
-    }
-
     fn set_bcb_counter(&mut self, bcb: BasicCoverageBlock, counter_kind: BcbCounter) -> BcbCounter {
         if let Some(replaced) = self.bcb_counters[bcb].replace(counter_kind) {
             bug!(
diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs
index fd74a2a97e2..360dccb240d 100644
--- a/compiler/rustc_mir_transform/src/coverage/graph.rs
+++ b/compiler/rustc_mir_transform/src/coverage/graph.rs
@@ -14,16 +14,16 @@ use std::ops::{Index, IndexMut};
 /// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s
 /// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s.
 #[derive(Debug)]
-pub(super) struct CoverageGraph {
+pub(crate) struct CoverageGraph {
     bcbs: IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
     bb_to_bcb: IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
-    pub successors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
-    pub predecessors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
+    pub(crate) successors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
+    pub(crate) predecessors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
     dominators: Option<Dominators<BasicCoverageBlock>>,
 }
 
 impl CoverageGraph {
-    pub fn from_mir(mir_body: &mir::Body<'_>) -> Self {
+    pub(crate) fn from_mir(mir_body: &mir::Body<'_>) -> Self {
         let (bcbs, bb_to_bcb) = Self::compute_basic_coverage_blocks(mir_body);
 
         // Pre-transform MIR `BasicBlock` successors and predecessors into the BasicCoverageBlock
@@ -135,24 +135,28 @@ impl CoverageGraph {
     }
 
     #[inline(always)]
-    pub fn iter_enumerated(
+    pub(crate) fn iter_enumerated(
         &self,
     ) -> impl Iterator<Item = (BasicCoverageBlock, &BasicCoverageBlockData)> {
         self.bcbs.iter_enumerated()
     }
 
     #[inline(always)]
-    pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> {
+    pub(crate) fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> {
         if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None }
     }
 
     #[inline(always)]
-    pub fn dominates(&self, dom: BasicCoverageBlock, node: BasicCoverageBlock) -> bool {
+    pub(crate) fn dominates(&self, dom: BasicCoverageBlock, node: BasicCoverageBlock) -> bool {
         self.dominators.as_ref().unwrap().dominates(dom, node)
     }
 
     #[inline(always)]
-    pub fn cmp_in_dominator_order(&self, a: BasicCoverageBlock, b: BasicCoverageBlock) -> Ordering {
+    pub(crate) fn cmp_in_dominator_order(
+        &self,
+        a: BasicCoverageBlock,
+        b: BasicCoverageBlock,
+    ) -> Ordering {
         self.dominators.as_ref().unwrap().cmp_in_dominator_order(a, b)
     }
 
@@ -166,7 +170,7 @@ impl CoverageGraph {
     ///
     /// FIXME: That assumption might not be true for [`TerminatorKind::Yield`]?
     #[inline(always)]
-    pub(super) fn bcb_has_multiple_in_edges(&self, bcb: BasicCoverageBlock) -> bool {
+    pub(crate) fn bcb_has_multiple_in_edges(&self, bcb: BasicCoverageBlock) -> bool {
         // Even though bcb0 conceptually has an extra virtual in-edge due to
         // being the entry point, we've already asserted that it has no _other_
         // in-edges, so there's no possibility of it having _multiple_ in-edges.
@@ -212,7 +216,7 @@ impl graph::StartNode for CoverageGraph {
 impl graph::Successors for CoverageGraph {
     #[inline]
     fn successors(&self, node: Self::Node) -> impl Iterator<Item = Self::Node> {
-        self.successors[node].iter().cloned()
+        self.successors[node].iter().copied()
     }
 }
 
@@ -227,7 +231,7 @@ rustc_index::newtype_index! {
     /// A node in the control-flow graph of CoverageGraph.
     #[orderable]
     #[debug_format = "bcb{}"]
-    pub(super) struct BasicCoverageBlock {
+    pub(crate) struct BasicCoverageBlock {
         const START_BCB = 0;
     }
 }
@@ -259,23 +263,23 @@ rustc_index::newtype_index! {
 /// queries (`dominates()`, `predecessors`, `successors`, etc.) have branch (control flow)
 /// significance.
 #[derive(Debug, Clone)]
-pub(super) struct BasicCoverageBlockData {
-    pub basic_blocks: Vec<BasicBlock>,
+pub(crate) struct BasicCoverageBlockData {
+    pub(crate) basic_blocks: Vec<BasicBlock>,
 }
 
 impl BasicCoverageBlockData {
-    pub fn from(basic_blocks: Vec<BasicBlock>) -> Self {
+    fn from(basic_blocks: Vec<BasicBlock>) -> Self {
         assert!(basic_blocks.len() > 0);
         Self { basic_blocks }
     }
 
     #[inline(always)]
-    pub fn leader_bb(&self) -> BasicBlock {
+    pub(crate) fn leader_bb(&self) -> BasicBlock {
         self.basic_blocks[0]
     }
 
     #[inline(always)]
-    pub fn last_bb(&self) -> BasicBlock {
+    pub(crate) fn last_bb(&self) -> BasicBlock {
         *self.basic_blocks.last().unwrap()
     }
 }
@@ -364,7 +368,7 @@ fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> Covera
 /// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that
 /// ensures a loop is completely traversed before processing Blocks after the end of the loop.
 #[derive(Debug)]
-pub(super) struct TraversalContext {
+struct TraversalContext {
     /// BCB with one or more incoming loop backedges, indicating which loop
     /// this context is for.
     ///
@@ -375,7 +379,7 @@ pub(super) struct TraversalContext {
     worklist: VecDeque<BasicCoverageBlock>,
 }
 
-pub(super) struct TraverseCoverageGraphWithLoops<'a> {
+pub(crate) struct TraverseCoverageGraphWithLoops<'a> {
     basic_coverage_blocks: &'a CoverageGraph,
 
     backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
@@ -384,7 +388,7 @@ pub(super) struct TraverseCoverageGraphWithLoops<'a> {
 }
 
 impl<'a> TraverseCoverageGraphWithLoops<'a> {
-    pub(super) fn new(basic_coverage_blocks: &'a CoverageGraph) -> Self {
+    pub(crate) fn new(basic_coverage_blocks: &'a CoverageGraph) -> Self {
         let backedges = find_loop_backedges(basic_coverage_blocks);
 
         let worklist = VecDeque::from([basic_coverage_blocks.start_node()]);
@@ -400,7 +404,7 @@ impl<'a> TraverseCoverageGraphWithLoops<'a> {
 
     /// For each loop on the loop context stack (top-down), yields a list of BCBs
     /// within that loop that have an outgoing edge back to the loop header.
-    pub(super) fn reloop_bcbs_per_loop(&self) -> impl Iterator<Item = &[BasicCoverageBlock]> {
+    pub(crate) fn reloop_bcbs_per_loop(&self) -> impl Iterator<Item = &[BasicCoverageBlock]> {
         self.context_stack
             .iter()
             .rev()
@@ -408,39 +412,38 @@ impl<'a> TraverseCoverageGraphWithLoops<'a> {
             .map(|header_bcb| self.backedges[header_bcb].as_slice())
     }
 
-    pub(super) fn next(&mut self) -> Option<BasicCoverageBlock> {
+    pub(crate) fn next(&mut self) -> Option<BasicCoverageBlock> {
         debug!(
             "TraverseCoverageGraphWithLoops::next - context_stack: {:?}",
             self.context_stack.iter().rev().collect::<Vec<_>>()
         );
 
         while let Some(context) = self.context_stack.last_mut() {
-            if let Some(bcb) = context.worklist.pop_front() {
-                if !self.visited.insert(bcb) {
-                    debug!("Already visited: {bcb:?}");
-                    continue;
-                }
-                debug!("Visiting {bcb:?}");
-
-                if self.backedges[bcb].len() > 0 {
-                    debug!("{bcb:?} is a loop header! Start a new TraversalContext...");
-                    self.context_stack.push(TraversalContext {
-                        loop_header: Some(bcb),
-                        worklist: VecDeque::new(),
-                    });
-                }
-                self.add_successors_to_worklists(bcb);
-                return Some(bcb);
-            } else {
-                // Strip contexts with empty worklists from the top of the stack
+            let Some(bcb) = context.worklist.pop_front() else {
+                // This stack level is exhausted; pop it and try the next one.
                 self.context_stack.pop();
+                continue;
+            };
+
+            if !self.visited.insert(bcb) {
+                debug!("Already visited: {bcb:?}");
+                continue;
+            }
+            debug!("Visiting {bcb:?}");
+
+            if self.backedges[bcb].len() > 0 {
+                debug!("{bcb:?} is a loop header! Start a new TraversalContext...");
+                self.context_stack
+                    .push(TraversalContext { loop_header: Some(bcb), worklist: VecDeque::new() });
             }
+            self.add_successors_to_worklists(bcb);
+            return Some(bcb);
         }
 
         None
     }
 
-    pub fn add_successors_to_worklists(&mut self, bcb: BasicCoverageBlock) {
+    fn add_successors_to_worklists(&mut self, bcb: BasicCoverageBlock) {
         let successors = &self.basic_coverage_blocks.successors[bcb];
         debug!("{:?} has {} successors:", bcb, successors.len());
 
@@ -494,11 +497,11 @@ impl<'a> TraverseCoverageGraphWithLoops<'a> {
         }
     }
 
-    pub fn is_complete(&self) -> bool {
+    pub(crate) fn is_complete(&self) -> bool {
         self.visited.count() == self.visited.domain_size()
     }
 
-    pub fn unvisited(&self) -> Vec<BasicCoverageBlock> {
+    pub(crate) fn unvisited(&self) -> Vec<BasicCoverageBlock> {
         let mut unvisited_set: BitSet<BasicCoverageBlock> =
             BitSet::new_filled(self.visited.domain_size());
         unvisited_set.subtract(&self.visited);
@@ -506,7 +509,7 @@ impl<'a> TraverseCoverageGraphWithLoops<'a> {
     }
 }
 
-pub(super) fn find_loop_backedges(
+fn find_loop_backedges(
     basic_coverage_blocks: &CoverageGraph,
 ) -> IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>> {
     let num_bcbs = basic_coverage_blocks.num_nodes();
diff --git a/compiler/rustc_mir_transform/src/coverage/mappings.rs b/compiler/rustc_mir_transform/src/coverage/mappings.rs
index 0e209757100..759bb7c1f9d 100644
--- a/compiler/rustc_mir_transform/src/coverage/mappings.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mappings.rs
@@ -5,6 +5,7 @@ use rustc_index::bit_set::BitSet;
 use rustc_index::IndexVec;
 use rustc_middle::mir::coverage::{BlockMarkerId, BranchSpan, ConditionInfo, CoverageKind};
 use rustc_middle::mir::{self, BasicBlock, StatementKind};
+use rustc_middle::ty::TyCtxt;
 use rustc_span::Span;
 
 use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph, START_BCB};
@@ -63,31 +64,35 @@ pub(super) struct ExtractedMappings {
 
 /// Extracts coverage-relevant spans from MIR, and associates them with
 /// their corresponding BCBs.
-pub(super) fn extract_all_mapping_info_from_mir(
-    mir_body: &mir::Body<'_>,
+pub(super) fn extract_all_mapping_info_from_mir<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    mir_body: &mir::Body<'tcx>,
     hir_info: &ExtractedHirInfo,
     basic_coverage_blocks: &CoverageGraph,
 ) -> ExtractedMappings {
-    if hir_info.is_async_fn {
+    let mut code_mappings = vec![];
+    let mut branch_pairs = vec![];
+    let mut mcdc_bitmap_bytes = 0;
+    let mut mcdc_branches = vec![];
+    let mut mcdc_decisions = vec![];
+
+    if hir_info.is_async_fn || tcx.sess.coverage_no_mir_spans() {
         // An async function desugars into a function that returns a future,
         // with the user code wrapped in a closure. Any spans in the desugared
         // outer function will be unhelpful, so just keep the signature span
         // and ignore all of the spans in the MIR body.
-        let mut mappings = ExtractedMappings::default();
+        //
+        // When debugging flag `-Zcoverage-options=no-mir-spans` is set, we need
+        // to give the same treatment to _all_ functions, because `llvm-cov`
+        // seems to ignore functions that don't have any ordinary code spans.
         if let Some(span) = hir_info.fn_sig_span_extended {
-            mappings.code_mappings.push(CodeMapping { span, bcb: START_BCB });
+            code_mappings.push(CodeMapping { span, bcb: START_BCB });
         }
-        return mappings;
+    } else {
+        // Extract coverage spans from MIR statements/terminators as normal.
+        extract_refined_covspans(mir_body, hir_info, basic_coverage_blocks, &mut code_mappings);
     }
 
-    let mut code_mappings = vec![];
-    let mut branch_pairs = vec![];
-    let mut mcdc_bitmap_bytes = 0;
-    let mut mcdc_branches = vec![];
-    let mut mcdc_decisions = vec![];
-
-    extract_refined_covspans(mir_body, hir_info, basic_coverage_blocks, &mut code_mappings);
-
     branch_pairs.extend(extract_branch_pairs(mir_body, hir_info, basic_coverage_blocks));
 
     extract_mcdc_mappings(
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index 419e39bc386..4a64d21f3d1 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -71,8 +71,12 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
 
     ////////////////////////////////////////////////////
     // Extract coverage spans and other mapping info from MIR.
-    let extracted_mappings =
-        mappings::extract_all_mapping_info_from_mir(mir_body, &hir_info, &basic_coverage_blocks);
+    let extracted_mappings = mappings::extract_all_mapping_info_from_mir(
+        tcx,
+        mir_body,
+        &hir_info,
+        &basic_coverage_blocks,
+    );
 
     ////////////////////////////////////////////////////
     // Create an optimized mix of `Counter`s and `Expression`s for the `CoverageGraph`. Ensure
diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs
index 65715253647..25744009be8 100644
--- a/compiler/rustc_mir_transform/src/coverage/query.rs
+++ b/compiler/rustc_mir_transform/src/coverage/query.rs
@@ -49,7 +49,7 @@ fn is_eligible_for_coverage(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
 /// Query implementation for `coverage_ids_info`.
 fn coverage_ids_info<'tcx>(
     tcx: TyCtxt<'tcx>,
-    instance_def: ty::InstanceDef<'tcx>,
+    instance_def: ty::InstanceKind<'tcx>,
 ) -> CoverageIdsInfo {
     let mir_body = tcx.instance_mir(instance_def);
 
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
index f2f76ac70c2..84a70d1f02d 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -1,10 +1,15 @@
-use rustc_middle::bug;
+use std::collections::VecDeque;
+
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fx::FxHashSet;
 use rustc_middle::mir;
-use rustc_span::{BytePos, Span};
+use rustc_span::Span;
 
 use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph};
 use crate::coverage::mappings;
-use crate::coverage::spans::from_mir::SpanFromMir;
+use crate::coverage::spans::from_mir::{
+    extract_covspans_and_holes_from_mir, ExtractedCovspans, Hole, SpanFromMir,
+};
 use crate::coverage::ExtractedHirInfo;
 
 mod from_mir;
@@ -20,302 +25,244 @@ pub(super) fn extract_refined_covspans(
     basic_coverage_blocks: &CoverageGraph,
     code_mappings: &mut impl Extend<mappings::CodeMapping>,
 ) {
-    let sorted_spans =
-        from_mir::mir_to_initial_sorted_coverage_spans(mir_body, hir_info, basic_coverage_blocks);
-    let coverage_spans = SpansRefiner::refine_sorted_spans(sorted_spans);
-    code_mappings.extend(coverage_spans.into_iter().map(|RefinedCovspan { bcb, span, .. }| {
-        // Each span produced by the generator represents an ordinary code region.
-        mappings::CodeMapping { span, bcb }
-    }));
-}
+    let ExtractedCovspans { mut covspans, mut holes } =
+        extract_covspans_and_holes_from_mir(mir_body, hir_info, basic_coverage_blocks);
 
-#[derive(Debug)]
-struct CurrCovspan {
-    span: Span,
-    bcb: BasicCoverageBlock,
-    is_hole: bool,
-}
+    // First, perform the passes that need macro information.
+    covspans.sort_by(|a, b| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb));
+    remove_unwanted_macro_spans(&mut covspans);
+    split_visible_macro_spans(&mut covspans);
 
-impl CurrCovspan {
-    fn new(span: Span, bcb: BasicCoverageBlock, is_hole: bool) -> Self {
-        Self { span, bcb, is_hole }
-    }
-
-    fn into_prev(self) -> PrevCovspan {
-        let Self { span, bcb, is_hole } = self;
-        PrevCovspan { span, bcb, merged_spans: vec![span], is_hole }
-    }
+    // We no longer need the extra information in `SpanFromMir`, so convert to `Covspan`.
+    let mut covspans = covspans.into_iter().map(SpanFromMir::into_covspan).collect::<Vec<_>>();
 
-    fn into_refined(self) -> RefinedCovspan {
-        // This is only called in cases where `curr` is a hole span that has
-        // been carved out of `prev`.
-        debug_assert!(self.is_hole);
-        self.into_prev().into_refined()
-    }
-}
+    let compare_covspans = |a: &Covspan, b: &Covspan| {
+        compare_spans(a.span, b.span)
+            // After deduplication, we want to keep only the most-dominated BCB.
+            .then_with(|| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb).reverse())
+    };
+    covspans.sort_by(compare_covspans);
 
-#[derive(Debug)]
-struct PrevCovspan {
-    span: Span,
-    bcb: BasicCoverageBlock,
-    /// List of all the original spans from MIR that have been merged into this
-    /// span. Mainly used to precisely skip over gaps when truncating a span.
-    merged_spans: Vec<Span>,
-    is_hole: bool,
-}
+    // Among covspans with the same span, keep only one,
+    // preferring the one with the most-dominated BCB.
+    // (Ideally we should try to preserve _all_ non-dominating BCBs, but that
+    // requires a lot more complexity in the span refiner, for little benefit.)
+    covspans.dedup_by(|b, a| a.span.source_equal(b.span));
 
-impl PrevCovspan {
-    fn is_mergeable(&self, other: &CurrCovspan) -> bool {
-        self.bcb == other.bcb && !self.is_hole && !other.is_hole
-    }
+    // Sort the holes, and merge overlapping/adjacent holes.
+    holes.sort_by(|a, b| compare_spans(a.span, b.span));
+    holes.dedup_by(|b, a| a.merge_if_overlapping_or_adjacent(b));
 
-    fn merge_from(&mut self, other: &CurrCovspan) {
-        debug_assert!(self.is_mergeable(other));
-        self.span = self.span.to(other.span);
-        self.merged_spans.push(other.span);
-    }
+    // Split the covspans into separate buckets that don't overlap any holes.
+    let buckets = divide_spans_into_buckets(covspans, &holes);
 
-    fn cutoff_statements_at(mut self, cutoff_pos: BytePos) -> Option<RefinedCovspan> {
-        self.merged_spans.retain(|span| span.hi() <= cutoff_pos);
-        if let Some(max_hi) = self.merged_spans.iter().map(|span| span.hi()).max() {
-            self.span = self.span.with_hi(max_hi);
-        }
+    for mut covspans in buckets {
+        // Make sure each individual bucket is internally sorted.
+        covspans.sort_by(compare_covspans);
+        let _span = debug_span!("processing bucket", ?covspans).entered();
 
-        if self.merged_spans.is_empty() { None } else { Some(self.into_refined()) }
-    }
+        let mut covspans = remove_unwanted_overlapping_spans(covspans);
+        debug!(?covspans, "after removing overlaps");
 
-    fn refined_copy(&self) -> RefinedCovspan {
-        let &Self { span, bcb, merged_spans: _, is_hole } = self;
-        RefinedCovspan { span, bcb, is_hole }
-    }
+        // Do one last merge pass, to simplify the output.
+        covspans.dedup_by(|b, a| a.merge_if_eligible(b));
+        debug!(?covspans, "after merge");
 
-    fn into_refined(self) -> RefinedCovspan {
-        // Even though we consume self, we can just reuse the copying impl.
-        self.refined_copy()
+        code_mappings.extend(covspans.into_iter().map(|Covspan { span, bcb }| {
+            // Each span produced by the refiner represents an ordinary code region.
+            mappings::CodeMapping { span, bcb }
+        }));
     }
 }
 
-#[derive(Debug)]
-struct RefinedCovspan {
-    span: Span,
-    bcb: BasicCoverageBlock,
-    is_hole: bool,
+/// Macros that expand into branches (e.g. `assert!`, `trace!`) tend to generate
+/// multiple condition/consequent blocks that have the span of the whole macro
+/// invocation, which is unhelpful. Keeping only the first such span seems to
+/// give better mappings, so remove the others.
+///
+/// (The input spans should be sorted in BCB dominator order, so that the
+/// retained "first" span is likely to dominate the others.)
+fn remove_unwanted_macro_spans(covspans: &mut Vec<SpanFromMir>) {
+    let mut seen_macro_spans = FxHashSet::default();
+    covspans.retain(|covspan| {
+        // Ignore (retain) non-macro-expansion spans.
+        if covspan.visible_macro.is_none() {
+            return true;
+        }
+
+        // Retain only the first macro-expanded covspan with this span.
+        seen_macro_spans.insert(covspan.span)
+    });
 }
 
-impl RefinedCovspan {
-    fn is_mergeable(&self, other: &Self) -> bool {
-        self.bcb == other.bcb && !self.is_hole && !other.is_hole
-    }
+/// When a span corresponds to a macro invocation that is visible from the
+/// function body, split it into two parts. The first part covers just the
+/// macro name plus `!`, and the second part covers the rest of the macro
+/// invocation. This seems to give better results for code that uses macros.
+fn split_visible_macro_spans(covspans: &mut Vec<SpanFromMir>) {
+    let mut extra_spans = vec![];
+
+    covspans.retain(|covspan| {
+        let Some(visible_macro) = covspan.visible_macro else { return true };
+
+        let split_len = visible_macro.as_str().len() as u32 + 1;
+        let (before, after) = covspan.span.split_at(split_len);
+        if !covspan.span.contains(before) || !covspan.span.contains(after) {
+            // Something is unexpectedly wrong with the split point.
+            // The debug assertion in `split_at` will have already caught this,
+            // but in release builds it's safer to do nothing and maybe get a
+            // bug report for unexpected coverage, rather than risk an ICE.
+            return true;
+        }
 
-    fn merge_from(&mut self, other: &Self) {
-        debug_assert!(self.is_mergeable(other));
-        self.span = self.span.to(other.span);
-    }
-}
+        extra_spans.push(SpanFromMir::new(before, covspan.visible_macro, covspan.bcb));
+        extra_spans.push(SpanFromMir::new(after, covspan.visible_macro, covspan.bcb));
+        false // Discard the original covspan that we just split.
+    });
 
-/// Converts the initial set of coverage spans (one per MIR `Statement` or `Terminator`) into a
-/// minimal set of coverage spans, using the BCB CFG to determine where it is safe and useful to:
-///
-///  * Remove duplicate source code coverage regions
-///  * Merge spans that represent continuous (both in source code and control flow), non-branching
-///    execution
-///  * Carve out (leave uncovered) any "hole" spans that need to be left blank
-///    (e.g. closures that will be counted by their own MIR body)
-struct SpansRefiner {
-    /// The initial set of coverage spans, sorted by `Span` (`lo` and `hi`) and by relative
-    /// dominance between the `BasicCoverageBlock`s of equal `Span`s.
-    sorted_spans_iter: std::vec::IntoIter<SpanFromMir>,
-
-    /// The current coverage span to compare to its `prev`, to possibly merge, discard,
-    /// or cause `prev` to be modified or discarded.
-    /// If `curr` is not discarded or merged, it becomes `prev` for the next iteration.
-    some_curr: Option<CurrCovspan>,
-
-    /// The coverage span from a prior iteration; typically assigned from that iteration's `curr`.
-    /// If that `curr` was discarded, `prev` retains its value from the previous iteration.
-    some_prev: Option<PrevCovspan>,
-
-    /// The final coverage spans to add to the coverage map. A `Counter` or `Expression`
-    /// will also be injected into the MIR for each BCB that has associated spans.
-    refined_spans: Vec<RefinedCovspan>,
+    // The newly-split spans are added at the end, so any previous sorting
+    // is not preserved.
+    covspans.extend(extra_spans);
 }
 
-impl SpansRefiner {
-    /// Takes the initial list of (sorted) spans extracted from MIR, and "refines"
-    /// them by merging compatible adjacent spans, removing redundant spans,
-    /// and carving holes in spans when they overlap in unwanted ways.
-    fn refine_sorted_spans(sorted_spans: Vec<SpanFromMir>) -> Vec<RefinedCovspan> {
-        let sorted_spans_len = sorted_spans.len();
-        let this = Self {
-            sorted_spans_iter: sorted_spans.into_iter(),
-            some_curr: None,
-            some_prev: None,
-            refined_spans: Vec::with_capacity(sorted_spans_len),
-        };
-
-        this.to_refined_spans()
+/// Uses the holes to divide the given covspans into buckets, such that:
+/// - No span in any hole overlaps a bucket (truncating the spans if necessary).
+/// - The spans in each bucket are strictly after all spans in previous buckets,
+///   and strictly before all spans in subsequent buckets.
+///
+/// The resulting buckets are sorted relative to each other, but might not be
+/// internally sorted.
+#[instrument(level = "debug")]
+fn divide_spans_into_buckets(input_covspans: Vec<Covspan>, holes: &[Hole]) -> Vec<Vec<Covspan>> {
+    debug_assert!(input_covspans.is_sorted_by(|a, b| compare_spans(a.span, b.span).is_le()));
+    debug_assert!(holes.is_sorted_by(|a, b| compare_spans(a.span, b.span).is_le()));
+
+    // Now we're ready to start carving holes out of the initial coverage spans,
+    // and grouping them in buckets separated by the holes.
+
+    let mut input_covspans = VecDeque::from(input_covspans);
+    let mut fragments = vec![];
+
+    // For each hole:
+    // - Identify the spans that are entirely or partly before the hole.
+    // - Put those spans in a corresponding bucket, truncated to the start of the hole.
+    // - If one of those spans also extends after the hole, put the rest of it
+    //   in a "fragments" vector that is processed by the next hole.
+    let mut buckets = (0..holes.len()).map(|_| vec![]).collect::<Vec<_>>();
+    for (hole, bucket) in holes.iter().zip(&mut buckets) {
+        let fragments_from_prev = std::mem::take(&mut fragments);
+
+        // Only inspect spans that precede or overlap this hole,
+        // leaving the rest to be inspected by later holes.
+        // (This relies on the spans and holes both being sorted.)
+        let relevant_input_covspans =
+            drain_front_while(&mut input_covspans, |c| c.span.lo() < hole.span.hi());
+
+        for covspan in fragments_from_prev.into_iter().chain(relevant_input_covspans) {
+            let (before, after) = covspan.split_around_hole_span(hole.span);
+            bucket.extend(before);
+            fragments.extend(after);
+        }
     }
 
-    /// Iterate through the sorted coverage spans, and return the refined list of merged and
-    /// de-duplicated spans.
-    fn to_refined_spans(mut self) -> Vec<RefinedCovspan> {
-        while self.next_coverage_span() {
-            // For the first span we don't have `prev` set, so most of the
-            // span-processing steps don't make sense yet.
-            if self.some_prev.is_none() {
-                debug!("  initial span");
-                continue;
-            }
+    // After finding the spans before each hole, any remaining fragments/spans
+    // form their own final bucket, after the final hole.
+    // (If there were no holes, this will just be all of the initial spans.)
+    fragments.extend(input_covspans);
+    buckets.push(fragments);
 
-            // The remaining cases assume that `prev` and `curr` are set.
-            let prev = self.prev();
-            let curr = self.curr();
-
-            if prev.is_mergeable(curr) {
-                debug!(?prev, "curr will be merged into prev");
-                let curr = self.take_curr();
-                self.prev_mut().merge_from(&curr);
-            } else if prev.span.hi() <= curr.span.lo() {
-                debug!(
-                    "  different bcbs and disjoint spans, so keep curr for next iter, and add prev={prev:?}",
-                );
-                let prev = self.take_prev().into_refined();
-                self.refined_spans.push(prev);
-            } else if prev.is_hole {
-                // drop any equal or overlapping span (`curr`) and keep `prev` to test again in the
-                // next iter
-                debug!(?prev, "prev (a hole) overlaps curr, so discarding curr");
-                self.take_curr(); // Discards curr.
-            } else if curr.is_hole {
-                self.carve_out_span_for_hole();
-            } else {
-                self.cutoff_prev_at_overlapping_curr();
-            }
-        }
+    buckets
+}
 
-        // There is usually a final span remaining in `prev` after the loop ends,
-        // so add it to the output as well.
-        if let Some(prev) = self.some_prev.take() {
-            debug!("    AT END, adding last prev={prev:?}");
-            self.refined_spans.push(prev.into_refined());
-        }
+/// Similar to `.drain(..)`, but stops just before it would remove an item not
+/// satisfying the predicate.
+fn drain_front_while<'a, T>(
+    queue: &'a mut VecDeque<T>,
+    mut pred_fn: impl FnMut(&T) -> bool,
+) -> impl Iterator<Item = T> + Captures<'a> {
+    std::iter::from_fn(move || if pred_fn(queue.front()?) { queue.pop_front() } else { None })
+}
 
-        // Do one last merge pass, to simplify the output.
-        self.refined_spans.dedup_by(|b, a| {
-            if a.is_mergeable(b) {
-                debug!(?a, ?b, "merging list-adjacent refined spans");
-                a.merge_from(b);
-                true
-            } else {
+/// Takes one of the buckets of (sorted) spans extracted from MIR, and "refines"
+/// those spans by removing spans that overlap in unwanted ways.
+#[instrument(level = "debug")]
+fn remove_unwanted_overlapping_spans(sorted_spans: Vec<Covspan>) -> Vec<Covspan> {
+    debug_assert!(sorted_spans.is_sorted_by(|a, b| compare_spans(a.span, b.span).is_le()));
+
+    // Holds spans that have been read from the input vector, but haven't yet
+    // been committed to the output vector.
+    let mut pending = vec![];
+    let mut refined = vec![];
+
+    for curr in sorted_spans {
+        pending.retain(|prev: &Covspan| {
+            if prev.span.hi() <= curr.span.lo() {
+                // There's no overlap between the previous/current covspans,
+                // so move the previous one into the refined list.
+                refined.push(prev.clone());
                 false
+            } else {
+                // Otherwise, retain the previous covspan only if it has the
+                // same BCB. This tends to discard long outer spans that enclose
+                // smaller inner spans with different control flow.
+                prev.bcb == curr.bcb
             }
         });
-
-        // Discard hole spans, since their purpose was to carve out chunks from
-        // other spans, but we don't want the holes themselves in the final mappings.
-        self.refined_spans.retain(|covspan| !covspan.is_hole);
-        self.refined_spans
-    }
-
-    #[track_caller]
-    fn curr(&self) -> &CurrCovspan {
-        self.some_curr.as_ref().unwrap_or_else(|| bug!("some_curr is None (curr)"))
+        pending.push(curr);
     }
 
-    /// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the
-    /// `curr` coverage span.
-    #[track_caller]
-    fn take_curr(&mut self) -> CurrCovspan {
-        self.some_curr.take().unwrap_or_else(|| bug!("some_curr is None (take_curr)"))
-    }
-
-    #[track_caller]
-    fn prev(&self) -> &PrevCovspan {
-        self.some_prev.as_ref().unwrap_or_else(|| bug!("some_prev is None (prev)"))
-    }
+    // Drain the rest of the pending list into the refined list.
+    refined.extend(pending);
+    refined
+}
 
-    #[track_caller]
-    fn prev_mut(&mut self) -> &mut PrevCovspan {
-        self.some_prev.as_mut().unwrap_or_else(|| bug!("some_prev is None (prev_mut)"))
-    }
+#[derive(Clone, Debug)]
+struct Covspan {
+    span: Span,
+    bcb: BasicCoverageBlock,
+}
 
-    #[track_caller]
-    fn take_prev(&mut self) -> PrevCovspan {
-        self.some_prev.take().unwrap_or_else(|| bug!("some_prev is None (take_prev)"))
-    }
+impl Covspan {
+    /// Splits this covspan into 0-2 parts:
+    /// - The part that is strictly before the hole span, if any.
+    /// - The part that is strictly after the hole span, if any.
+    fn split_around_hole_span(&self, hole_span: Span) -> (Option<Self>, Option<Self>) {
+        let before = try {
+            let span = self.span.trim_end(hole_span)?;
+            Self { span, ..*self }
+        };
+        let after = try {
+            let span = self.span.trim_start(hole_span)?;
+            Self { span, ..*self }
+        };
 
-    /// Advance `prev` to `curr` (if any), and `curr` to the next coverage span in sorted order.
-    fn next_coverage_span(&mut self) -> bool {
-        if let Some(curr) = self.some_curr.take() {
-            self.some_prev = Some(curr.into_prev());
-        }
-        while let Some(curr) = self.sorted_spans_iter.next() {
-            debug!("FOR curr={:?}", curr);
-            if let Some(prev) = &self.some_prev
-                && prev.span.lo() > curr.span.lo()
-            {
-                // Skip curr because prev has already advanced beyond the end of curr.
-                // This can only happen if a prior iteration updated `prev` to skip past
-                // a region of code, such as skipping past a hole.
-                debug!(?prev, "prev.span starts after curr.span, so curr will be dropped");
-            } else {
-                self.some_curr = Some(CurrCovspan::new(curr.span, curr.bcb, curr.is_hole));
-                return true;
-            }
-        }
-        false
+        (before, after)
     }
 
-    /// If `prev`s span extends left of the hole (`curr`), carve out the hole's span from
-    /// `prev`'s span. Add the portion of the span to the left of the hole; and if the span
-    /// extends to the right of the hole, update `prev` to that portion of the span.
-    fn carve_out_span_for_hole(&mut self) {
-        let prev = self.prev();
-        let curr = self.curr();
-
-        let left_cutoff = curr.span.lo();
-        let right_cutoff = curr.span.hi();
-        let has_pre_hole_span = prev.span.lo() < right_cutoff;
-        let has_post_hole_span = prev.span.hi() > right_cutoff;
-
-        if has_pre_hole_span {
-            let mut pre_hole = prev.refined_copy();
-            pre_hole.span = pre_hole.span.with_hi(left_cutoff);
-            debug!(?pre_hole, "prev overlaps a hole; adding pre-hole span");
-            self.refined_spans.push(pre_hole);
+    /// If `self` and `other` can be merged (i.e. they have the same BCB),
+    /// mutates `self.span` to also include `other.span` and returns true.
+    ///
+    /// Note that compatible covspans can be merged even if their underlying
+    /// spans are not overlapping/adjacent; any space between them will also be
+    /// part of the merged covspan.
+    fn merge_if_eligible(&mut self, other: &Self) -> bool {
+        if self.bcb != other.bcb {
+            return false;
         }
 
-        if has_post_hole_span {
-            // Mutate `prev.span` to start after the hole (and discard curr).
-            self.prev_mut().span = self.prev().span.with_lo(right_cutoff);
-            debug!(prev=?self.prev(), "mutated prev to start after the hole");
-
-            // Prevent this curr from becoming prev.
-            let hole_covspan = self.take_curr().into_refined();
-            self.refined_spans.push(hole_covspan); // since self.prev() was already updated
-        }
+        self.span = self.span.to(other.span);
+        true
     }
+}
 
-    /// `curr` overlaps `prev`. If `prev`s span extends left of `curr`s span, keep _only_
-    /// statements that end before `curr.lo()` (if any), and add the portion of the
-    /// combined span for those statements. Any other statements have overlapping spans
-    /// that can be ignored because `curr` and/or other upcoming statements/spans inside
-    /// the overlap area will produce their own counters. This disambiguation process
-    /// avoids injecting multiple counters for overlapping spans, and the potential for
-    /// double-counting.
-    fn cutoff_prev_at_overlapping_curr(&mut self) {
-        debug!(
-            "  different bcbs, overlapping spans, so ignore/drop pending and only add prev \
-            if it has statements that end before curr; prev={:?}",
-            self.prev()
-        );
-
-        let curr_span = self.curr().span;
-        if let Some(prev) = self.take_prev().cutoff_statements_at(curr_span.lo()) {
-            debug!("after cutoff, adding {prev:?}");
-            self.refined_spans.push(prev);
-        } else {
-            debug!("prev was eliminated by cutoff");
-        }
-    }
+/// Compares two spans in (lo ascending, hi descending) order.
+fn compare_spans(a: Span, b: Span) -> std::cmp::Ordering {
+    // First sort by span start.
+    Ord::cmp(&a.lo(), &b.lo())
+        // If span starts are the same, sort by span end in reverse order.
+        // This ensures that if spans A and B are adjacent in the list,
+        // and they overlap but are not equal, then either:
+        // - Span A extends further left, or
+        // - Both have the same start and span A extends further right
+        .then_with(|| Ord::cmp(&a.hi(), &b.hi()).reverse())
 }
diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
index d1727a94a35..09deb7534bf 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
@@ -1,5 +1,3 @@
-use rustc_data_structures::captures::Captures;
-use rustc_data_structures::fx::FxHashSet;
 use rustc_middle::bug;
 use rustc_middle::mir::coverage::CoverageKind;
 use rustc_middle::mir::{
@@ -11,118 +9,48 @@ use rustc_span::{ExpnKind, MacroKind, Span, Symbol};
 use crate::coverage::graph::{
     BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph, START_BCB,
 };
+use crate::coverage::spans::Covspan;
 use crate::coverage::ExtractedHirInfo;
 
+pub(crate) struct ExtractedCovspans {
+    pub(crate) covspans: Vec<SpanFromMir>,
+    pub(crate) holes: Vec<Hole>,
+}
+
 /// Traverses the MIR body to produce an initial collection of coverage-relevant
 /// spans, each associated with a node in the coverage graph (BCB) and possibly
 /// other metadata.
-///
-/// The returned spans are sorted in a specific order that is expected by the
-/// subsequent span-refinement step.
-pub(super) fn mir_to_initial_sorted_coverage_spans(
+pub(crate) fn extract_covspans_and_holes_from_mir(
     mir_body: &mir::Body<'_>,
     hir_info: &ExtractedHirInfo,
     basic_coverage_blocks: &CoverageGraph,
-) -> Vec<SpanFromMir> {
+) -> ExtractedCovspans {
     let &ExtractedHirInfo { body_span, .. } = hir_info;
 
-    let mut initial_spans = vec![];
+    let mut covspans = vec![];
+    let mut holes = vec![];
 
     for (bcb, bcb_data) in basic_coverage_blocks.iter_enumerated() {
-        initial_spans.extend(bcb_to_initial_coverage_spans(mir_body, body_span, bcb, bcb_data));
+        bcb_to_initial_coverage_spans(
+            mir_body,
+            body_span,
+            bcb,
+            bcb_data,
+            &mut covspans,
+            &mut holes,
+        );
     }
 
     // Only add the signature span if we found at least one span in the body.
-    if !initial_spans.is_empty() {
+    if !covspans.is_empty() || !holes.is_empty() {
         // If there is no usable signature span, add a fake one (before refinement)
         // to avoid an ugly gap between the body start and the first real span.
         // FIXME: Find a more principled way to solve this problem.
         let fn_sig_span = hir_info.fn_sig_span_extended.unwrap_or_else(|| body_span.shrink_to_lo());
-        initial_spans.push(SpanFromMir::for_fn_sig(fn_sig_span));
+        covspans.push(SpanFromMir::for_fn_sig(fn_sig_span));
     }
 
-    initial_spans.sort_by(|a, b| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb));
-    remove_unwanted_macro_spans(&mut initial_spans);
-    split_visible_macro_spans(&mut initial_spans);
-
-    initial_spans.sort_by(|a, b| {
-        // First sort by span start.
-        Ord::cmp(&a.span.lo(), &b.span.lo())
-            // If span starts are the same, sort by span end in reverse order.
-            // This ensures that if spans A and B are adjacent in the list,
-            // and they overlap but are not equal, then either:
-            // - Span A extends further left, or
-            // - Both have the same start and span A extends further right
-            .then_with(|| Ord::cmp(&a.span.hi(), &b.span.hi()).reverse())
-            // If two spans have the same lo & hi, put hole spans first,
-            // as they take precedence over non-hole spans.
-            .then_with(|| Ord::cmp(&a.is_hole, &b.is_hole).reverse())
-            // After deduplication, we want to keep only the most-dominated BCB.
-            .then_with(|| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb).reverse())
-    });
-
-    // Among covspans with the same span, keep only one. Hole spans take
-    // precedence, otherwise keep the one with the most-dominated BCB.
-    // (Ideally we should try to preserve _all_ non-dominating BCBs, but that
-    // requires a lot more complexity in the span refiner, for little benefit.)
-    initial_spans.dedup_by(|b, a| a.span.source_equal(b.span));
-
-    initial_spans
-}
-
-/// Macros that expand into branches (e.g. `assert!`, `trace!`) tend to generate
-/// multiple condition/consequent blocks that have the span of the whole macro
-/// invocation, which is unhelpful. Keeping only the first such span seems to
-/// give better mappings, so remove the others.
-///
-/// (The input spans should be sorted in BCB dominator order, so that the
-/// retained "first" span is likely to dominate the others.)
-fn remove_unwanted_macro_spans(initial_spans: &mut Vec<SpanFromMir>) {
-    let mut seen_macro_spans = FxHashSet::default();
-    initial_spans.retain(|covspan| {
-        // Ignore (retain) hole spans and non-macro-expansion spans.
-        if covspan.is_hole || covspan.visible_macro.is_none() {
-            return true;
-        }
-
-        // Retain only the first macro-expanded covspan with this span.
-        seen_macro_spans.insert(covspan.span)
-    });
-}
-
-/// When a span corresponds to a macro invocation that is visible from the
-/// function body, split it into two parts. The first part covers just the
-/// macro name plus `!`, and the second part covers the rest of the macro
-/// invocation. This seems to give better results for code that uses macros.
-fn split_visible_macro_spans(initial_spans: &mut Vec<SpanFromMir>) {
-    let mut extra_spans = vec![];
-
-    initial_spans.retain(|covspan| {
-        if covspan.is_hole {
-            return true;
-        }
-
-        let Some(visible_macro) = covspan.visible_macro else { return true };
-
-        let split_len = visible_macro.as_str().len() as u32 + 1;
-        let (before, after) = covspan.span.split_at(split_len);
-        if !covspan.span.contains(before) || !covspan.span.contains(after) {
-            // Something is unexpectedly wrong with the split point.
-            // The debug assertion in `split_at` will have already caught this,
-            // but in release builds it's safer to do nothing and maybe get a
-            // bug report for unexpected coverage, rather than risk an ICE.
-            return true;
-        }
-
-        assert!(!covspan.is_hole);
-        extra_spans.push(SpanFromMir::new(before, covspan.visible_macro, covspan.bcb, false));
-        extra_spans.push(SpanFromMir::new(after, covspan.visible_macro, covspan.bcb, false));
-        false // Discard the original covspan that we just split.
-    });
-
-    // The newly-split spans are added at the end, so any previous sorting
-    // is not preserved.
-    initial_spans.extend(extra_spans);
+    ExtractedCovspans { covspans, holes }
 }
 
 // Generate a set of coverage spans from the filtered set of `Statement`s and `Terminator`s of
@@ -135,8 +63,10 @@ fn bcb_to_initial_coverage_spans<'a, 'tcx>(
     body_span: Span,
     bcb: BasicCoverageBlock,
     bcb_data: &'a BasicCoverageBlockData,
-) -> impl Iterator<Item = SpanFromMir> + Captures<'a> + Captures<'tcx> {
-    bcb_data.basic_blocks.iter().flat_map(move |&bb| {
+    initial_covspans: &mut Vec<SpanFromMir>,
+    holes: &mut Vec<Hole>,
+) {
+    for &bb in &bcb_data.basic_blocks {
         let data = &mir_body[bb];
 
         let unexpand = move |expn_span| {
@@ -146,24 +76,32 @@ fn bcb_to_initial_coverage_spans<'a, 'tcx>(
                 .filter(|(span, _)| !span.source_equal(body_span))
         };
 
-        let statement_spans = data.statements.iter().filter_map(move |statement| {
+        let mut extract_statement_span = |statement| {
             let expn_span = filtered_statement_span(statement)?;
             let (span, visible_macro) = unexpand(expn_span)?;
 
             // A statement that looks like the assignment of a closure expression
             // is treated as a "hole" span, to be carved out of other spans.
-            Some(SpanFromMir::new(span, visible_macro, bcb, is_closure_like(statement)))
-        });
+            if is_closure_like(statement) {
+                holes.push(Hole { span });
+            } else {
+                initial_covspans.push(SpanFromMir::new(span, visible_macro, bcb));
+            }
+            Some(())
+        };
+        for statement in data.statements.iter() {
+            extract_statement_span(statement);
+        }
 
-        let terminator_span = Some(data.terminator()).into_iter().filter_map(move |terminator| {
+        let mut extract_terminator_span = |terminator| {
             let expn_span = filtered_terminator_span(terminator)?;
             let (span, visible_macro) = unexpand(expn_span)?;
 
-            Some(SpanFromMir::new(span, visible_macro, bcb, false))
-        });
-
-        statement_spans.chain(terminator_span)
-    })
+            initial_covspans.push(SpanFromMir::new(span, visible_macro, bcb));
+            Some(())
+        };
+        extract_terminator_span(data.terminator());
+    }
 }
 
 fn is_closure_like(statement: &Statement<'_>) -> bool {
@@ -331,7 +269,23 @@ fn unexpand_into_body_span_with_prev(
 }
 
 #[derive(Debug)]
-pub(super) struct SpanFromMir {
+pub(crate) struct Hole {
+    pub(crate) span: Span,
+}
+
+impl Hole {
+    pub(crate) fn merge_if_overlapping_or_adjacent(&mut self, other: &mut Self) -> bool {
+        if !self.span.overlaps_or_adjacent(other.span) {
+            return false;
+        }
+
+        self.span = self.span.to(other.span);
+        true
+    }
+}
+
+#[derive(Debug)]
+pub(crate) struct SpanFromMir {
     /// A span that has been extracted from MIR and then "un-expanded" back to
     /// within the current function's `body_span`. After various intermediate
     /// processing steps, this span is emitted as part of the final coverage
@@ -339,26 +293,22 @@ pub(super) struct SpanFromMir {
     ///
     /// With the exception of `fn_sig_span`, this should always be contained
     /// within `body_span`.
-    pub(super) span: Span,
-    visible_macro: Option<Symbol>,
-    pub(super) bcb: BasicCoverageBlock,
-    /// If true, this covspan represents a "hole" that should be carved out
-    /// from other spans, e.g. because it represents a closure expression that
-    /// will be instrumented separately as its own function.
-    pub(super) is_hole: bool,
+    pub(crate) span: Span,
+    pub(crate) visible_macro: Option<Symbol>,
+    pub(crate) bcb: BasicCoverageBlock,
 }
 
 impl SpanFromMir {
     fn for_fn_sig(fn_sig_span: Span) -> Self {
-        Self::new(fn_sig_span, None, START_BCB, false)
+        Self::new(fn_sig_span, None, START_BCB)
+    }
+
+    pub(crate) fn new(span: Span, visible_macro: Option<Symbol>, bcb: BasicCoverageBlock) -> Self {
+        Self { span, visible_macro, bcb }
     }
 
-    fn new(
-        span: Span,
-        visible_macro: Option<Symbol>,
-        bcb: BasicCoverageBlock,
-        is_hole: bool,
-    ) -> Self {
-        Self { span, visible_macro, bcb, is_hole }
+    pub(crate) fn into_covspan(self) -> Covspan {
+        let Self { span, visible_macro: _, bcb } = self;
+        Covspan { span, bcb }
     }
 }
diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs
index ca64688e6b8..048547dc9f5 100644
--- a/compiler/rustc_mir_transform/src/coverage/tests.rs
+++ b/compiler/rustc_mir_transform/src/coverage/tests.rs
@@ -24,7 +24,6 @@
 //! globals is comparatively simpler. The easiest way is to wrap the test in a closure argument
 //! to: `rustc_span::create_default_session_globals_then(|| { test_here(); })`.
 
-use super::counters;
 use super::graph::{self, BasicCoverageBlock};
 
 use itertools::Itertools;
@@ -551,108 +550,3 @@ fn test_covgraph_switchint_loop_then_inner_loop_else_break() {
     assert_successors(&basic_coverage_blocks, bcb(5), &[bcb(1)]);
     assert_successors(&basic_coverage_blocks, bcb(6), &[bcb(4)]);
 }
-
-#[test]
-fn test_find_loop_backedges_none() {
-    let mir_body = goto_switchint();
-    let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
-    if false {
-        eprintln!(
-            "basic_coverage_blocks = {:?}",
-            basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
-        );
-        eprintln!("successors = {:?}", basic_coverage_blocks.successors);
-    }
-    let backedges = graph::find_loop_backedges(&basic_coverage_blocks);
-    assert_eq!(
-        backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::<usize>(),
-        0,
-        "backedges: {:?}",
-        backedges
-    );
-}
-
-#[test]
-fn test_find_loop_backedges_one() {
-    let mir_body = switchint_then_loop_else_return();
-    let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
-    let backedges = graph::find_loop_backedges(&basic_coverage_blocks);
-    assert_eq!(
-        backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::<usize>(),
-        1,
-        "backedges: {:?}",
-        backedges
-    );
-
-    assert_eq!(backedges[bcb(1)], &[bcb(3)]);
-}
-
-#[test]
-fn test_find_loop_backedges_two() {
-    let mir_body = switchint_loop_then_inner_loop_else_break();
-    let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
-    let backedges = graph::find_loop_backedges(&basic_coverage_blocks);
-    assert_eq!(
-        backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::<usize>(),
-        2,
-        "backedges: {:?}",
-        backedges
-    );
-
-    assert_eq!(backedges[bcb(1)], &[bcb(5)]);
-    assert_eq!(backedges[bcb(4)], &[bcb(6)]);
-}
-
-#[test]
-fn test_traverse_coverage_with_loops() {
-    let mir_body = switchint_loop_then_inner_loop_else_break();
-    let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
-    let mut traversed_in_order = Vec::new();
-    let mut traversal = graph::TraverseCoverageGraphWithLoops::new(&basic_coverage_blocks);
-    while let Some(bcb) = traversal.next() {
-        traversed_in_order.push(bcb);
-    }
-
-    // bcb0 is visited first. Then bcb1 starts the first loop, and all remaining nodes, *except*
-    // bcb6 are inside the first loop.
-    assert_eq!(
-        *traversed_in_order.last().expect("should have elements"),
-        bcb(6),
-        "bcb6 should not be visited until all nodes inside the first loop have been visited"
-    );
-}
-
-#[test]
-fn test_make_bcb_counters() {
-    rustc_span::create_default_session_globals_then(|| {
-        let mir_body = goto_switchint();
-        let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
-        // Historically this test would use `spans` internals to set up fake
-        // coverage spans for BCBs 1 and 2. Now we skip that step and just tell
-        // BCB counter construction that those BCBs have spans.
-        let bcb_has_coverage_spans = |bcb: BasicCoverageBlock| (1..=2).contains(&bcb.as_usize());
-        let coverage_counters = counters::CoverageCounters::make_bcb_counters(
-            &basic_coverage_blocks,
-            bcb_has_coverage_spans,
-        );
-        assert_eq!(coverage_counters.num_expressions(), 0);
-
-        assert_eq!(
-            0, // bcb1 has a `Counter` with id = 0
-            match coverage_counters.bcb_counter(bcb(1)).expect("should have a counter") {
-                counters::BcbCounter::Counter { id, .. } => id,
-                _ => panic!("expected a Counter"),
-            }
-            .as_u32()
-        );
-
-        assert_eq!(
-            1, // bcb2 has a `Counter` with id = 1
-            match coverage_counters.bcb_counter(bcb(2)).expect("should have a counter") {
-                counters::BcbCounter::Counter { id, .. } => id,
-                _ => panic!("expected a Counter"),
-            }
-            .as_u32()
-        );
-    });
-}
diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
index e88b727a21e..0fd85eb345d 100644
--- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
+++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
@@ -10,7 +10,7 @@ use rustc_middle::bug;
 use rustc_middle::mir::interpret::{InterpResult, Scalar};
 use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
 use rustc_middle::mir::*;
-use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_mir_dataflow::value_analysis::{
     Map, PlaceIndex, State, TrackElem, ValueAnalysis, ValueAnalysisWrapper, ValueOrPlace,
@@ -203,7 +203,8 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
                     && let operand_ty = operand.ty(self.local_decls, self.tcx)
                     && let Some(operand_ty) = operand_ty.builtin_deref(true)
                     && let ty::Array(_, len) = operand_ty.kind()
-                    && let Some(len) = Const::Ty(*len).try_eval_scalar_int(self.tcx, self.param_env)
+                    && let Some(len) = Const::Ty(self.tcx.types.usize, *len)
+                        .try_eval_scalar_int(self.tcx, self.param_env)
                 {
                     state.insert_value_idx(target_len, FlatSet::Elem(len.into()), self.map());
                 }
@@ -221,7 +222,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
             Rvalue::Len(place) => {
                 let place_ty = place.ty(self.local_decls, self.tcx);
                 if let ty::Array(_, len) = place_ty.ty.kind() {
-                    Const::Ty(*len)
+                    Const::Ty(self.tcx.types.usize, *len)
                         .try_eval_scalar(self.tcx, self.param_env)
                         .map_or(FlatSet::Top, FlatSet::Elem)
                 } else if let [ProjectionElem::Deref] = place.projection[..] {
@@ -284,9 +285,11 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
                 let val = match null_op {
                     NullOp::SizeOf if layout.is_sized() => layout.size.bytes(),
                     NullOp::AlignOf if layout.is_sized() => layout.align.abi.bytes(),
-                    NullOp::OffsetOf(fields) => {
-                        layout.offset_of_subfield(&self.ecx, fields.iter()).bytes()
-                    }
+                    NullOp::OffsetOf(fields) => self
+                        .ecx
+                        .tcx
+                        .offset_of_subfield(self.ecx.param_env(), layout, fields.iter())
+                        .bytes(),
                     _ => return ValueOrPlace::Value(FlatSet::Top),
                 };
                 FlatSet::Elem(Scalar::from_target_usize(val, &self.tcx))
@@ -323,7 +326,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
             // This allows the set of visited edges to grow monotonically with the lattice.
             FlatSet::Bottom => TerminatorEdges::None,
             FlatSet::Elem(scalar) => {
-                let choice = scalar.assert_bits(scalar.size());
+                let choice = scalar.assert_scalar_int().to_bits_unchecked();
                 TerminatorEdges::Single(targets.target_for_value(choice))
             }
             FlatSet::Top => TerminatorEdges::SwitchInt { discr, targets },
@@ -606,7 +609,7 @@ fn propagatable_scalar(
     map: &Map,
 ) -> Option<Scalar> {
     if let FlatSet::Elem(value) = state.get_idx(place, map)
-        && value.try_to_int().is_ok()
+        && value.try_to_scalar_int().is_ok()
     {
         // Do not attempt to propagate pointers, as we may fail to preserve their identity.
         Some(value)
@@ -667,7 +670,7 @@ fn try_write_constant<'tcx>(
                 let FlatSet::Elem(Scalar::Int(discr)) = state.get_idx(discr, map) else {
                     throw_machine_stop_str!("discriminant with provenance")
                 };
-                let discr_bits = discr.assert_bits(discr.size());
+                let discr_bits = discr.to_bits(discr.size());
                 let Some((variant, _)) = def.discriminants(*ecx.tcx).find(|(_, var)| discr_bits == var.val) else {
                     throw_machine_stop_str!("illegal discriminant for enum")
                 };
diff --git a/compiler/rustc_mir_transform/src/dump_mir.rs b/compiler/rustc_mir_transform/src/dump_mir.rs
index 13841be494c..3b71cf02c1a 100644
--- a/compiler/rustc_mir_transform/src/dump_mir.rs
+++ b/compiler/rustc_mir_transform/src/dump_mir.rs
@@ -28,6 +28,9 @@ pub fn emit_mir(tcx: TyCtxt<'_>) -> io::Result<()> {
         OutFileName::Real(path) => {
             let mut f = io::BufWriter::new(File::create(&path)?);
             write_mir_pretty(tcx, None, &mut f)?;
+            if tcx.sess.opts.json_artifact_notifications {
+                tcx.dcx().emit_artifact_notification(&path, "mir");
+            }
         }
     }
     Ok(())
diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs
index 03d952abad1..665b2260294 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs
@@ -34,14 +34,14 @@ use std::fmt;
 ///
 /// ```text
 // fn drop_term<T>(t: &mut T) {
-//     mir!(
+//     mir! {
 //         {
 //             Drop(*t, exit)
 //         }
 //         exit = {
 //             Return()
 //         }
-//     )
+//     }
 // }
 /// ```
 pub struct ElaborateDrops;
diff --git a/compiler/rustc_mir_transform/src/errors.rs b/compiler/rustc_mir_transform/src/errors.rs
index b28dcb38cb6..dc7648d27b5 100644
--- a/compiler/rustc_mir_transform/src/errors.rs
+++ b/compiler/rustc_mir_transform/src/errors.rs
@@ -104,7 +104,7 @@ impl<'a> LintDiagnostic<'a, ()> for MustNotSupend<'_, '_> {
         diag.primary_message(fluent::mir_transform_must_not_suspend);
         diag.span_label(self.yield_sp, fluent::_subdiag::label);
         if let Some(reason) = self.reason {
-            diag.subdiagnostic(diag.dcx, reason);
+            diag.subdiagnostic(reason);
         }
         diag.span_help(self.src_sp, fluent::_subdiag::help);
         diag.arg("pre", self.pre);
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index fadb5edefdf..0f8f28e3462 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -83,8 +83,8 @@
 //! that contain `AllocId`s.
 
 use rustc_const_eval::const_eval::DummyMachine;
-use rustc_const_eval::interpret::{intern_const_alloc_for_constprop, MemoryKind};
-use rustc_const_eval::interpret::{ImmTy, InterpCx, OpTy, Projectable, Scalar};
+use rustc_const_eval::interpret::{intern_const_alloc_for_constprop, MemPlaceMeta, MemoryKind};
+use rustc_const_eval::interpret::{ImmTy, Immediate, InterpCx, OpTy, Projectable, Scalar};
 use rustc_data_structures::fx::FxIndexSet;
 use rustc_data_structures::graph::dominators::Dominators;
 use rustc_hir::def::DefKind;
@@ -95,11 +95,11 @@ use rustc_middle::bug;
 use rustc_middle::mir::interpret::GlobalAlloc;
 use rustc_middle::mir::visit::*;
 use rustc_middle::mir::*;
-use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_span::def_id::DefId;
 use rustc_span::DUMMY_SP;
-use rustc_target::abi::{self, Abi, Size, VariantIdx, FIRST_VARIANT};
+use rustc_target::abi::{self, Abi, FieldIdx, Size, VariantIdx, FIRST_VARIANT};
 use smallvec::SmallVec;
 use std::borrow::Cow;
 
@@ -177,6 +177,12 @@ enum AggregateTy<'tcx> {
     Array,
     Tuple,
     Def(DefId, ty::GenericArgsRef<'tcx>),
+    RawPtr {
+        /// Needed for cast propagation.
+        data_pointer_ty: Ty<'tcx>,
+        /// The data pointer can be anything thin, so doesn't determine the output.
+        output_pointer_ty: Ty<'tcx>,
+    },
 }
 
 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
@@ -223,7 +229,6 @@ enum Value<'tcx> {
     NullaryOp(NullOp<'tcx>, Ty<'tcx>),
     UnaryOp(UnOp, VnIndex),
     BinaryOp(BinOp, VnIndex, VnIndex),
-    CheckedBinaryOp(BinOp, VnIndex, VnIndex), // FIXME get rid of this, work like MIR instead
     Cast {
         kind: CastKind,
         value: VnIndex,
@@ -325,8 +330,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
         let is_sized = !self.feature_unsized_locals
             || self.local_decls[local].ty.is_sized(self.tcx, self.param_env);
         if is_sized {
-            self.rev_locals.ensure_contains_elem(value, SmallVec::new);
-            self.rev_locals[value].push(local);
+            self.rev_locals.ensure_contains_elem(value, SmallVec::new).push(local);
         }
     }
 
@@ -386,11 +390,22 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     AggregateTy::Def(def_id, args) => {
                         self.tcx.type_of(def_id).instantiate(self.tcx, args)
                     }
+                    AggregateTy::RawPtr { output_pointer_ty, .. } => output_pointer_ty,
                 };
                 let variant = if ty.is_enum() { Some(variant) } else { None };
                 let ty = self.ecx.layout_of(ty).ok()?;
                 if ty.is_zst() {
                     ImmTy::uninit(ty).into()
+                } else if matches!(kind, AggregateTy::RawPtr { .. }) {
+                    // Pointers don't have fields, so don't `project_field` them.
+                    let data = self.ecx.read_pointer(fields[0]).ok()?;
+                    let meta = if fields[1].layout.is_zst() {
+                        MemPlaceMeta::None
+                    } else {
+                        MemPlaceMeta::Meta(self.ecx.read_scalar(fields[1]).ok()?)
+                    };
+                    let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx);
+                    ImmTy::from_immediate(ptr_imm, ty).into()
                 } else if matches!(ty.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
                     let dest = self.ecx.allocate(ty, MemoryKind::Stack).ok()?;
                     let variant_dest = if let Some(variant) = variant {
@@ -472,7 +487,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 let slice = self.evaluated[slice].as_ref()?;
                 let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
                 let len = slice.len(&self.ecx).ok()?;
-                let imm = ImmTy::try_from_uint(len, usize_layout)?;
+                let imm = ImmTy::from_uint(len, usize_layout);
                 imm.into()
             }
             NullaryOp(null_op, ty) => {
@@ -485,13 +500,15 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 let val = match null_op {
                     NullOp::SizeOf => layout.size.bytes(),
                     NullOp::AlignOf => layout.align.abi.bytes(),
-                    NullOp::OffsetOf(fields) => {
-                        layout.offset_of_subfield(&self.ecx, fields.iter()).bytes()
-                    }
+                    NullOp::OffsetOf(fields) => self
+                        .ecx
+                        .tcx
+                        .offset_of_subfield(self.ecx.param_env(), layout, fields.iter())
+                        .bytes(),
                     NullOp::UbChecks => return None,
                 };
                 let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
-                let imm = ImmTy::try_from_uint(val, usize_layout)?;
+                let imm = ImmTy::from_uint(val, usize_layout);
                 imm.into()
             }
             UnaryOp(un_op, operand) => {
@@ -508,17 +525,6 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 let val = self.ecx.binary_op(bin_op, &lhs, &rhs).ok()?;
                 val.into()
             }
-            CheckedBinaryOp(bin_op, lhs, rhs) => {
-                let lhs = self.evaluated[lhs].as_ref()?;
-                let lhs = self.ecx.read_immediate(lhs).ok()?;
-                let rhs = self.evaluated[rhs].as_ref()?;
-                let rhs = self.ecx.read_immediate(rhs).ok()?;
-                let val = self
-                    .ecx
-                    .binary_op(bin_op.wrapping_to_overflowing().unwrap(), &lhs, &rhs)
-                    .ok()?;
-                val.into()
-            }
             Cast { kind, value, from: _, to } => match kind {
                 CastKind::IntToInt | CastKind::IntToFloat => {
                     let value = self.evaluated[value].as_ref()?;
@@ -829,17 +835,10 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 let lhs = lhs?;
                 let rhs = rhs?;
 
-                if let Some(op) = op.overflowing_to_wrapping() {
-                    if let Some(value) = self.simplify_binary(op, true, ty, lhs, rhs) {
-                        return Some(value);
-                    }
-                    Value::CheckedBinaryOp(op, lhs, rhs)
-                } else {
-                    if let Some(value) = self.simplify_binary(op, false, ty, lhs, rhs) {
-                        return Some(value);
-                    }
-                    Value::BinaryOp(op, lhs, rhs)
+                if let Some(value) = self.simplify_binary(op, ty, lhs, rhs) {
+                    return Some(value);
                 }
+                Value::BinaryOp(op, lhs, rhs)
             }
             Rvalue::UnaryOp(op, ref mut arg) => {
                 let arg = self.simplify_operand(arg, location)?;
@@ -881,10 +880,10 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
         rvalue: &mut Rvalue<'tcx>,
         location: Location,
     ) -> Option<VnIndex> {
-        let Rvalue::Aggregate(box ref kind, ref mut fields) = *rvalue else { bug!() };
+        let Rvalue::Aggregate(box ref kind, ref mut field_ops) = *rvalue else { bug!() };
 
         let tcx = self.tcx;
-        if fields.is_empty() {
+        if field_ops.is_empty() {
             let is_zst = match *kind {
                 AggregateKind::Array(..)
                 | AggregateKind::Tuple
@@ -903,13 +902,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             }
         }
 
-        let (ty, variant_index) = match *kind {
+        let (mut ty, variant_index) = match *kind {
             AggregateKind::Array(..) => {
-                assert!(!fields.is_empty());
+                assert!(!field_ops.is_empty());
                 (AggregateTy::Array, FIRST_VARIANT)
             }
             AggregateKind::Tuple => {
-                assert!(!fields.is_empty());
+                assert!(!field_ops.is_empty());
                 (AggregateTy::Tuple, FIRST_VARIANT)
             }
             AggregateKind::Closure(did, args)
@@ -920,15 +919,49 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             }
             // Do not track unions.
             AggregateKind::Adt(_, _, _, _, Some(_)) => return None,
-            // FIXME: Do the extra work to GVN `from_raw_parts`
-            AggregateKind::RawPtr(..) => return None,
+            AggregateKind::RawPtr(pointee_ty, mtbl) => {
+                assert_eq!(field_ops.len(), 2);
+                let data_pointer_ty = field_ops[FieldIdx::ZERO].ty(self.local_decls, self.tcx);
+                let output_pointer_ty = Ty::new_ptr(self.tcx, pointee_ty, mtbl);
+                (AggregateTy::RawPtr { data_pointer_ty, output_pointer_ty }, FIRST_VARIANT)
+            }
         };
 
-        let fields: Option<Vec<_>> = fields
+        let fields: Option<Vec<_>> = field_ops
             .iter_mut()
             .map(|op| self.simplify_operand(op, location).or_else(|| self.new_opaque()))
             .collect();
-        let fields = fields?;
+        let mut fields = fields?;
+
+        if let AggregateTy::RawPtr { data_pointer_ty, output_pointer_ty } = &mut ty {
+            let mut was_updated = false;
+
+            // Any thin pointer of matching mutability is fine as the data pointer.
+            while let Value::Cast {
+                kind: CastKind::PtrToPtr,
+                value: cast_value,
+                from: cast_from,
+                to: _,
+            } = self.get(fields[0])
+                && let ty::RawPtr(from_pointee_ty, from_mtbl) = cast_from.kind()
+                && let ty::RawPtr(_, output_mtbl) = output_pointer_ty.kind()
+                && from_mtbl == output_mtbl
+                && from_pointee_ty.is_sized(self.tcx, self.param_env)
+            {
+                fields[0] = *cast_value;
+                *data_pointer_ty = *cast_from;
+                was_updated = true;
+            }
+
+            if was_updated {
+                if let Some(const_) = self.try_as_constant(fields[0]) {
+                    field_ops[FieldIdx::ZERO] = Operand::Constant(Box::new(const_));
+                } else if let Some(local) = self.try_as_local(fields[0], location) {
+                    field_ops[FieldIdx::ZERO] = Operand::Copy(Place::from(local));
+                    self.reused_locals.insert(local);
+                }
+            }
+        }
 
         if let AggregateTy::Array = ty
             && fields.len() > 4
@@ -960,6 +993,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             (UnOp::Not, Value::BinaryOp(BinOp::Ne, lhs, rhs)) => {
                 Value::BinaryOp(BinOp::Eq, *lhs, *rhs)
             }
+            (UnOp::PtrMetadata, Value::Aggregate(AggregateTy::RawPtr { .. }, _, fields)) => {
+                return Some(fields[1]);
+            }
             _ => return None,
         };
 
@@ -970,7 +1006,6 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
     fn simplify_binary(
         &mut self,
         op: BinOp,
-        checked: bool,
         lhs_ty: Ty<'tcx>,
         lhs: VnIndex,
         rhs: VnIndex,
@@ -999,22 +1034,39 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
         use Either::{Left, Right};
         let a = as_bits(lhs).map_or(Right(lhs), Left);
         let b = as_bits(rhs).map_or(Right(rhs), Left);
+
         let result = match (op, a, b) {
             // Neutral elements.
-            (BinOp::Add | BinOp::BitOr | BinOp::BitXor, Left(0), Right(p))
+            (
+                BinOp::Add
+                | BinOp::AddWithOverflow
+                | BinOp::AddUnchecked
+                | BinOp::BitOr
+                | BinOp::BitXor,
+                Left(0),
+                Right(p),
+            )
             | (
                 BinOp::Add
+                | BinOp::AddWithOverflow
+                | BinOp::AddUnchecked
                 | BinOp::BitOr
                 | BinOp::BitXor
                 | BinOp::Sub
+                | BinOp::SubWithOverflow
+                | BinOp::SubUnchecked
                 | BinOp::Offset
                 | BinOp::Shl
                 | BinOp::Shr,
                 Right(p),
                 Left(0),
             )
-            | (BinOp::Mul, Left(1), Right(p))
-            | (BinOp::Mul | BinOp::Div, Right(p), Left(1)) => p,
+            | (BinOp::Mul | BinOp::MulWithOverflow | BinOp::MulUnchecked, Left(1), Right(p))
+            | (
+                BinOp::Mul | BinOp::MulWithOverflow | BinOp::MulUnchecked | BinOp::Div,
+                Right(p),
+                Left(1),
+            ) => p,
             // Attempt to simplify `x & ALL_ONES` to `x`, with `ALL_ONES` depending on type size.
             (BinOp::BitAnd, Right(p), Left(ones)) | (BinOp::BitAnd, Left(ones), Right(p))
                 if ones == layout.size.truncate(u128::MAX)
@@ -1023,10 +1075,21 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 p
             }
             // Absorbing elements.
-            (BinOp::Mul | BinOp::BitAnd, _, Left(0))
+            (
+                BinOp::Mul | BinOp::MulWithOverflow | BinOp::MulUnchecked | BinOp::BitAnd,
+                _,
+                Left(0),
+            )
             | (BinOp::Rem, _, Left(1))
             | (
-                BinOp::Mul | BinOp::Div | BinOp::Rem | BinOp::BitAnd | BinOp::Shl | BinOp::Shr,
+                BinOp::Mul
+                | BinOp::MulWithOverflow
+                | BinOp::MulUnchecked
+                | BinOp::Div
+                | BinOp::Rem
+                | BinOp::BitAnd
+                | BinOp::Shl
+                | BinOp::Shr,
                 Left(0),
                 _,
             ) => self.insert_scalar(Scalar::from_uint(0u128, layout.size), lhs_ty),
@@ -1038,7 +1101,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 self.insert_scalar(Scalar::from_uint(ones, layout.size), lhs_ty)
             }
             // Sub/Xor with itself.
-            (BinOp::Sub | BinOp::BitXor, a, b) if a == b => {
+            (BinOp::Sub | BinOp::SubWithOverflow | BinOp::SubUnchecked | BinOp::BitXor, a, b)
+                if a == b =>
+            {
                 self.insert_scalar(Scalar::from_uint(0u128, layout.size), lhs_ty)
             }
             // Comparison:
@@ -1052,7 +1117,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             _ => return None,
         };
 
-        if checked {
+        if op.is_overflowing() {
             let false_val = self.insert_bool(false);
             Some(self.insert_tuple(vec![result, false_val]))
         } else {
@@ -1082,6 +1147,23 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             return self.new_opaque();
         }
 
+        let mut was_updated = false;
+
+        // If that cast just casts away the metadata again,
+        if let PtrToPtr = kind
+            && let Value::Aggregate(AggregateTy::RawPtr { data_pointer_ty, .. }, _, fields) =
+                self.get(value)
+            && let ty::RawPtr(to_pointee, _) = to.kind()
+            && to_pointee.is_sized(self.tcx, self.param_env)
+        {
+            from = *data_pointer_ty;
+            value = fields[0];
+            was_updated = true;
+            if *data_pointer_ty == to {
+                return Some(fields[0]);
+            }
+        }
+
         if let PtrToPtr | PointerCoercion(MutToConstPointer) = kind
             && let Value::Cast { kind: inner_kind, value: inner_value, from: inner_from, to: _ } =
                 *self.get(value)
@@ -1090,9 +1172,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             from = inner_from;
             value = inner_value;
             *kind = PtrToPtr;
+            was_updated = true;
             if inner_from == to {
                 return Some(inner_value);
             }
+        }
+
+        if was_updated {
             if let Some(const_) = self.try_as_constant(value) {
                 *operand = Operand::Constant(Box::new(const_));
             } else if let Some(local) = self.try_as_local(value, location) {
@@ -1108,7 +1194,11 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
         // Trivial case: we are fetching a statically known length.
         let place_ty = place.ty(self.local_decls, self.tcx).ty;
         if let ty::Array(_, len) = place_ty.kind() {
-            return self.insert_constant(Const::from_ty_const(*len, self.tcx));
+            return self.insert_constant(Const::from_ty_const(
+                *len,
+                self.tcx.types.usize,
+                self.tcx,
+            ));
         }
 
         let mut inner = self.simplify_place_value(place, location)?;
@@ -1130,7 +1220,11 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             && let Some(to) = to.builtin_deref(true)
             && let ty::Slice(..) = to.kind()
         {
-            return self.insert_constant(Const::from_ty_const(*len, self.tcx));
+            return self.insert_constant(Const::from_ty_const(
+                *len,
+                self.tcx.types.usize,
+                self.tcx,
+            ));
         }
 
         // Fallback: a symbolic `Len`.
@@ -1160,7 +1254,7 @@ fn op_to_prop_const<'tcx>(
     // If this constant has scalar ABI, return it as a `ConstValue::Scalar`.
     if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi
         && let Ok(scalar) = ecx.read_scalar(op)
-        && scalar.try_to_int().is_ok()
+        && scalar.try_to_scalar_int().is_ok()
     {
         return Some(ConstValue::Scalar(scalar));
     }
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index fe2237dd2e9..d04bb8d302e 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -10,7 +10,7 @@ use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}
 use rustc_middle::mir::visit::*;
 use rustc_middle::mir::*;
 use rustc_middle::ty::TypeVisitableExt;
-use rustc_middle::ty::{self, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
+use rustc_middle::ty::{self, Instance, InstanceKind, ParamEnv, Ty, TyCtxt};
 use rustc_session::config::{DebugInfo, OptLevel};
 use rustc_span::source_map::Spanned;
 use rustc_span::sym;
@@ -225,13 +225,8 @@ impl<'tcx> Inliner<'tcx> {
         // Normally, this shouldn't be required, but trait normalization failure can create a
         // validation ICE.
         let output_type = callee_body.return_ty();
-        if !util::relate_types(
-            self.tcx,
-            self.param_env,
-            ty::Variance::Covariant,
-            output_type,
-            destination_ty,
-        ) {
+        if !util::relate_types(self.tcx, self.param_env, ty::Covariant, output_type, destination_ty)
+        {
             trace!(?output_type, ?destination_ty);
             return Err("failed to normalize return type");
         }
@@ -261,13 +256,8 @@ impl<'tcx> Inliner<'tcx> {
                 self_arg_ty.into_iter().chain(arg_tuple_tys).zip(callee_body.args_iter())
             {
                 let input_type = callee_body.local_decls[input].ty;
-                if !util::relate_types(
-                    self.tcx,
-                    self.param_env,
-                    ty::Variance::Covariant,
-                    input_type,
-                    arg_ty,
-                ) {
+                if !util::relate_types(self.tcx, self.param_env, ty::Covariant, input_type, arg_ty)
+                {
                     trace!(?arg_ty, ?input_type);
                     return Err("failed to normalize tuple argument type");
                 }
@@ -276,13 +266,8 @@ impl<'tcx> Inliner<'tcx> {
             for (arg, input) in args.iter().zip(callee_body.args_iter()) {
                 let input_type = callee_body.local_decls[input].ty;
                 let arg_ty = arg.node.ty(&caller_body.local_decls, self.tcx);
-                if !util::relate_types(
-                    self.tcx,
-                    self.param_env,
-                    ty::Variance::Covariant,
-                    input_type,
-                    arg_ty,
-                ) {
+                if !util::relate_types(self.tcx, self.param_env, ty::Covariant, input_type, arg_ty)
+                {
                     trace!(?arg_ty, ?input_type);
                     return Err("failed to normalize argument type");
                 }
@@ -308,7 +293,7 @@ impl<'tcx> Inliner<'tcx> {
         }
 
         match callee.def {
-            InstanceDef::Item(_) => {
+            InstanceKind::Item(_) => {
                 // If there is no MIR available (either because it was not in metadata or
                 // because it has no MIR because it's an extern function), then the inliner
                 // won't cause cycles on this.
@@ -317,24 +302,24 @@ impl<'tcx> Inliner<'tcx> {
                 }
             }
             // These have no own callable MIR.
-            InstanceDef::Intrinsic(_) | InstanceDef::Virtual(..) => {
+            InstanceKind::Intrinsic(_) | InstanceKind::Virtual(..) => {
                 return Err("instance without MIR (intrinsic / virtual)");
             }
             // This cannot result in an immediate cycle since the callee MIR is a shim, which does
             // not get any optimizations run on it. Any subsequent inlining may cause cycles, but we
             // do not need to catch this here, we can wait until the inliner decides to continue
             // inlining a second time.
-            InstanceDef::VTableShim(_)
-            | InstanceDef::ReifyShim(..)
-            | InstanceDef::FnPtrShim(..)
-            | InstanceDef::ClosureOnceShim { .. }
-            | InstanceDef::ConstructCoroutineInClosureShim { .. }
-            | InstanceDef::CoroutineKindShim { .. }
-            | InstanceDef::DropGlue(..)
-            | InstanceDef::CloneShim(..)
-            | InstanceDef::ThreadLocalShim(..)
-            | InstanceDef::FnPtrAddrShim(..)
-            | InstanceDef::AsyncDropGlueCtorShim(..) => return Ok(()),
+            InstanceKind::VTableShim(_)
+            | InstanceKind::ReifyShim(..)
+            | InstanceKind::FnPtrShim(..)
+            | InstanceKind::ClosureOnceShim { .. }
+            | InstanceKind::ConstructCoroutineInClosureShim { .. }
+            | InstanceKind::CoroutineKindShim { .. }
+            | InstanceKind::DropGlue(..)
+            | InstanceKind::CloneShim(..)
+            | InstanceKind::ThreadLocalShim(..)
+            | InstanceKind::FnPtrAddrShim(..)
+            | InstanceKind::AsyncDropGlueCtorShim(..) => return Ok(()),
         }
 
         if self.tcx.is_constructor(callee_def_id) {
@@ -387,7 +372,7 @@ impl<'tcx> Inliner<'tcx> {
                 let callee =
                     Instance::resolve(self.tcx, self.param_env, def_id, args).ok().flatten()?;
 
-                if let InstanceDef::Virtual(..) | InstanceDef::Intrinsic(_) = callee.def {
+                if let InstanceKind::Virtual(..) | InstanceKind::Intrinsic(_) = callee.def {
                     return None;
                 }
 
@@ -399,7 +384,7 @@ impl<'tcx> Inliner<'tcx> {
 
                 // Additionally, check that the body that we're inlining actually agrees
                 // with the ABI of the trait that the item comes from.
-                if let InstanceDef::Item(instance_def_id) = callee.def
+                if let InstanceKind::Item(instance_def_id) = callee.def
                     && self.tcx.def_kind(instance_def_id) == DefKind::AssocFn
                     && let instance_fn_sig = self.tcx.fn_sig(instance_def_id).skip_binder()
                     && instance_fn_sig.abi() != fn_sig.abi()
@@ -1078,10 +1063,10 @@ impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
 #[instrument(skip(tcx), level = "debug")]
 fn try_instance_mir<'tcx>(
     tcx: TyCtxt<'tcx>,
-    instance: InstanceDef<'tcx>,
+    instance: InstanceKind<'tcx>,
 ) -> Result<&'tcx Body<'tcx>, &'static str> {
-    if let ty::InstanceDef::DropGlue(_, Some(ty))
-    | ty::InstanceDef::AsyncDropGlueCtorShim(_, Some(ty)) = instance
+    if let ty::InstanceKind::DropGlue(_, Some(ty))
+    | ty::InstanceKind::AsyncDropGlueCtorShim(_, Some(ty)) = instance
         && let ty::Adt(def, args) = ty.kind()
     {
         let fields = def.all_fields();
diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs
index 8c5f965108b..35bcd24ce95 100644
--- a/compiler/rustc_mir_transform/src/inline/cycle.rs
+++ b/compiler/rustc_mir_transform/src/inline/cycle.rs
@@ -3,7 +3,7 @@ use rustc_data_structures::stack::ensure_sufficient_stack;
 use rustc_hir::def_id::{DefId, LocalDefId};
 use rustc_middle::mir::TerminatorKind;
 use rustc_middle::ty::TypeVisitableExt;
-use rustc_middle::ty::{self, GenericArgsRef, InstanceDef, TyCtxt};
+use rustc_middle::ty::{self, GenericArgsRef, InstanceKind, TyCtxt};
 use rustc_session::Limit;
 use rustc_span::sym;
 
@@ -22,7 +22,7 @@ pub(crate) fn mir_callgraph_reachable<'tcx>(
         "you should not call `mir_callgraph_reachable` on immediate self recursion"
     );
     assert!(
-        matches!(root.def, InstanceDef::Item(_)),
+        matches!(root.def, InstanceKind::Item(_)),
         "you should not call `mir_callgraph_reachable` on shims"
     );
     assert!(
@@ -70,7 +70,7 @@ pub(crate) fn mir_callgraph_reachable<'tcx>(
             }
 
             match callee.def {
-                InstanceDef::Item(_) => {
+                InstanceKind::Item(_) => {
                     // If there is no MIR available (either because it was not in metadata or
                     // because it has no MIR because it's an extern function), then the inliner
                     // won't cause cycles on this.
@@ -80,24 +80,24 @@ pub(crate) fn mir_callgraph_reachable<'tcx>(
                     }
                 }
                 // These have no own callable MIR.
-                InstanceDef::Intrinsic(_) | InstanceDef::Virtual(..) => continue,
+                InstanceKind::Intrinsic(_) | InstanceKind::Virtual(..) => continue,
                 // These have MIR and if that MIR is inlined, instantiated and then inlining is run
                 // again, a function item can end up getting inlined. Thus we'll be able to cause
                 // a cycle that way
-                InstanceDef::VTableShim(_)
-                | InstanceDef::ReifyShim(..)
-                | InstanceDef::FnPtrShim(..)
-                | InstanceDef::ClosureOnceShim { .. }
-                | InstanceDef::ConstructCoroutineInClosureShim { .. }
-                | InstanceDef::CoroutineKindShim { .. }
-                | InstanceDef::ThreadLocalShim { .. }
-                | InstanceDef::CloneShim(..) => {}
+                InstanceKind::VTableShim(_)
+                | InstanceKind::ReifyShim(..)
+                | InstanceKind::FnPtrShim(..)
+                | InstanceKind::ClosureOnceShim { .. }
+                | InstanceKind::ConstructCoroutineInClosureShim { .. }
+                | InstanceKind::CoroutineKindShim { .. }
+                | InstanceKind::ThreadLocalShim { .. }
+                | InstanceKind::CloneShim(..) => {}
 
                 // This shim does not call any other functions, thus there can be no recursion.
-                InstanceDef::FnPtrAddrShim(..) => {
+                InstanceKind::FnPtrAddrShim(..) => {
                     continue;
                 }
-                InstanceDef::DropGlue(..) | InstanceDef::AsyncDropGlueCtorShim(..) => {
+                InstanceKind::DropGlue(..) | InstanceKind::AsyncDropGlueCtorShim(..) => {
                     // FIXME: A not fully instantiated drop shim can cause ICEs if one attempts to
                     // have its MIR built. Likely oli-obk just screwed up the `ParamEnv`s, so this
                     // needs some more analysis.
@@ -151,12 +151,12 @@ pub(crate) fn mir_callgraph_reachable<'tcx>(
 
 pub(crate) fn mir_inliner_callees<'tcx>(
     tcx: TyCtxt<'tcx>,
-    instance: ty::InstanceDef<'tcx>,
+    instance: ty::InstanceKind<'tcx>,
 ) -> &'tcx [(DefId, GenericArgsRef<'tcx>)] {
     let steal;
     let guard;
     let body = match (instance, instance.def_id().as_local()) {
-        (InstanceDef::Item(_), Some(def_id)) => {
+        (InstanceKind::Item(_), Some(def_id)) => {
             steal = tcx.mir_promoted(def_id).0;
             guard = steal.borrow();
             &*guard
diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs
index 40db3e38fd3..6806c517c17 100644
--- a/compiler/rustc_mir_transform/src/instsimplify.rs
+++ b/compiler/rustc_mir_transform/src/instsimplify.rs
@@ -150,7 +150,7 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
                     return;
                 }
 
-                let const_ = Const::from_ty_const(len, self.tcx);
+                let const_ = Const::from_ty_const(len, self.tcx.types.usize, self.tcx);
                 let constant = ConstOperand { span: source_info.span, const_, user_ty: None };
                 *rvalue = Rvalue::Use(Operand::Constant(Box::new(constant)));
             }
diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs
index 8b46658b322..8d6c00bbedb 100644
--- a/compiler/rustc_mir_transform/src/known_panics_lint.rs
+++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs
@@ -356,15 +356,12 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                 debug!("check_binary_op: reporting assert for {:?}", location);
                 let panic = AssertKind::Overflow(
                     op,
-                    match l {
-                        Some(l) => l.to_const_int(),
-                        // Invent a dummy value, the diagnostic ignores it anyway
-                        None => ConstInt::new(
-                            ScalarInt::try_from_uint(1_u8, left_size).unwrap(),
-                            left_ty.is_signed(),
-                            left_ty.is_ptr_sized_integral(),
-                        ),
-                    },
+                    // Invent a dummy value, the diagnostic ignores it anyway
+                    ConstInt::new(
+                        ScalarInt::try_from_uint(1_u8, left_size).unwrap(),
+                        left_ty.is_signed(),
+                        left_ty.is_ptr_sized_integral(),
+                    ),
                     r.to_const_int(),
                 );
                 self.report_assert_as_lint(location, AssertLintKind::ArithmeticOverflow, panic);
@@ -625,9 +622,10 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                 let val = match null_op {
                     NullOp::SizeOf => op_layout.size.bytes(),
                     NullOp::AlignOf => op_layout.align.abi.bytes(),
-                    NullOp::OffsetOf(fields) => {
-                        op_layout.offset_of_subfield(self, fields.iter()).bytes()
-                    }
+                    NullOp::OffsetOf(fields) => self
+                        .tcx
+                        .offset_of_subfield(self.param_env, op_layout, fields.iter())
+                        .bytes(),
                     NullOp::UbChecks => return None,
                 };
                 ImmTy::from_scalar(Scalar::from_target_usize(val, self), layout).into()
@@ -708,9 +706,9 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
         self.super_operand(operand, location);
     }
 
-    fn visit_constant(&mut self, constant: &ConstOperand<'tcx>, location: Location) {
-        trace!("visit_constant: {:?}", constant);
-        self.super_constant(constant, location);
+    fn visit_const_operand(&mut self, constant: &ConstOperand<'tcx>, location: Location) {
+        trace!("visit_const_operand: {:?}", constant);
+        self.super_const_operand(constant, location);
         self.eval_constant(constant);
     }
 
@@ -786,8 +784,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
             TerminatorKind::SwitchInt { ref discr, ref targets } => {
                 if let Some(ref value) = self.eval_operand(discr)
                     && let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value))
-                    && let Ok(constant) = value_const.try_to_int()
-                    && let Ok(constant) = constant.try_to_bits(constant.size())
+                    && let Ok(constant) = value_const.to_bits(value_const.size())
                 {
                     // We managed to evaluate the discriminant, so we know we only need to visit
                     // one target.
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
index a8741254ffb..afba6781a70 100644
--- a/compiler/rustc_mir_transform/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -1,8 +1,10 @@
+// tidy-alphabetical-start
 #![feature(assert_matches)]
 #![feature(box_patterns)]
 #![feature(const_type_name)]
 #![feature(cow_is_borrowed)]
 #![feature(decl_macro)]
+#![feature(if_let_guard)]
 #![feature(impl_trait_in_assoc_type)]
 #![feature(is_sorted)]
 #![feature(let_chains)]
@@ -12,7 +14,7 @@
 #![feature(round_char_boundary)]
 #![feature(try_blocks)]
 #![feature(yeet_expr)]
-#![feature(if_let_guard)]
+// tidy-alphabetical-end
 
 #[macro_use]
 extern crate tracing;
@@ -55,7 +57,6 @@ mod remove_place_mention;
 // This pass is public to allow external drivers to perform MIR cleanup
 mod add_subtyping_projections;
 pub mod cleanup_post_borrowck;
-mod const_debuginfo;
 mod copy_prop;
 mod coroutine;
 mod cost_checker;
@@ -106,6 +107,7 @@ mod check_alignment;
 pub mod simplify;
 mod simplify_branches;
 mod simplify_comparison_integral;
+mod single_use_consts;
 mod sroa;
 mod unreachable_enum_branching;
 mod unreachable_prop;
@@ -211,7 +213,7 @@ fn remap_mir_for_const_eval_select<'tcx>(
 }
 
 fn is_mir_available(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
-    tcx.hir().maybe_body_owned_by(def_id).is_some()
+    tcx.mir_keys(()).contains(&def_id)
 }
 
 /// Finds the full set of `DefId`s within the current crate that have
@@ -222,16 +224,6 @@ fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> {
     // All body-owners have MIR associated with them.
     set.extend(tcx.hir().body_owners());
 
-    // Inline consts' bodies are created in
-    // typeck instead of during ast lowering, like all other bodies so far.
-    for def_id in tcx.hir().body_owners() {
-        // Incremental performance optimization: only load typeck results for things that actually have inline consts
-        if tcx.hir_owner_nodes(tcx.hir().body_owned_by(def_id).id().hir_id.owner).has_inline_consts
-        {
-            set.extend(tcx.typeck(def_id).inline_consts.values())
-        }
-    }
-
     // Additionally, tuple struct/variant constructors have MIR, but
     // they don't have a BodyId, so we need to build them separately.
     struct GatherCtors<'a> {
@@ -406,7 +398,7 @@ fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> &
     if is_fn_like {
         // Do not compute the mir call graph without said call graph actually being used.
         if pm::should_run_pass(tcx, &inline::Inline) {
-            tcx.ensure_with_value().mir_inliner_callees(ty::InstanceDef::Item(def.to_def_id()));
+            tcx.ensure_with_value().mir_inliner_callees(ty::InstanceKind::Item(def.to_def_id()));
         }
     }
 
@@ -603,7 +595,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
             &gvn::GVN,
             &simplify::SimplifyLocals::AfterGVN,
             &dataflow_const_prop::DataflowConstProp,
-            &const_debuginfo::ConstDebugInfo,
+            &single_use_consts::SingleUseConsts,
             &o1(simplify_branches::SimplifyConstCondition::AfterConstProp),
             &jump_threading::JumpThreading,
             &early_otherwise_branch::EarlyOtherwiseBranch,
diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs
index 1411d9be223..6ab4ec6fe7e 100644
--- a/compiler/rustc_mir_transform/src/match_branches.rs
+++ b/compiler/rustc_mir_transform/src/match_branches.rs
@@ -372,7 +372,7 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToExp {
         }
 
         fn int_equal(l: ScalarInt, r: impl Into<u128>, size: Size) -> bool {
-            l.assert_int(l.size()) == ScalarInt::try_from_uint(r, size).unwrap().assert_int(size)
+            l.to_bits_unchecked() == ScalarInt::try_from_uint(r, size).unwrap().to_bits_unchecked()
         }
 
         // We first compare the two branches, and then the other branches need to fulfill the same conditions.
diff --git a/compiler/rustc_mir_transform/src/normalize_array_len.rs b/compiler/rustc_mir_transform/src/normalize_array_len.rs
index 2070895c900..d5e72706661 100644
--- a/compiler/rustc_mir_transform/src/normalize_array_len.rs
+++ b/compiler/rustc_mir_transform/src/normalize_array_len.rs
@@ -95,7 +95,7 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'tcx> {
             *rvalue = Rvalue::Use(Operand::Constant(Box::new(ConstOperand {
                 span: rustc_span::DUMMY_SP,
                 user_ty: None,
-                const_: Const::from_ty_const(len, self.tcx),
+                const_: Const::from_ty_const(len, self.tcx.types.usize, self.tcx),
             })));
         }
         self.super_rvalue(rvalue, loc);
diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs
index 7ec59cc983f..ecdca8292b4 100644
--- a/compiler/rustc_mir_transform/src/promote_consts.rs
+++ b/compiler/rustc_mir_transform/src/promote_consts.rs
@@ -500,14 +500,14 @@ impl<'tcx> Validator<'_, 'tcx> {
                                 }
                                 _ => None,
                             };
-                            match rhs_val.map(|x| x.assert_uint(sz)) {
+                            match rhs_val.map(|x| x.to_uint(sz)) {
                                 // for the zero test, int vs uint does not matter
                                 Some(x) if x != 0 => {}        // okay
                                 _ => return Err(Unpromotable), // value not known or 0 -- not okay
                             }
                             // Furthermore, for signed divison, we also have to exclude `int::MIN / -1`.
                             if lhs_ty.is_signed() {
-                                match rhs_val.map(|x| x.assert_int(sz)) {
+                                match rhs_val.map(|x| x.to_int(sz)) {
                                     Some(-1) | None => {
                                         // The RHS is -1 or unknown, so we have to be careful.
                                         // But is the LHS int::MIN?
@@ -518,7 +518,7 @@ impl<'tcx> Validator<'_, 'tcx> {
                                             _ => None,
                                         };
                                         let lhs_min = sz.signed_int_min();
-                                        match lhs_val.map(|x| x.assert_int(sz)) {
+                                        match lhs_val.map(|x| x.to_int(sz)) {
                                             Some(x) if x != lhs_min => {}  // okay
                                             _ => return Err(Unpromotable), // value not known or int::MIN -- not okay
                                         }
@@ -956,7 +956,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Promoter<'a, 'tcx> {
         }
     }
 
-    fn visit_constant(&mut self, constant: &mut ConstOperand<'tcx>, _location: Location) {
+    fn visit_const_operand(&mut self, constant: &mut ConstOperand<'tcx>, _location: Location) {
         if constant.const_.is_required_const() {
             self.promoted.required_consts.push(*constant);
         }
diff --git a/compiler/rustc_mir_transform/src/required_consts.rs b/compiler/rustc_mir_transform/src/required_consts.rs
index 71ac929d35e..00bfb5e6600 100644
--- a/compiler/rustc_mir_transform/src/required_consts.rs
+++ b/compiler/rustc_mir_transform/src/required_consts.rs
@@ -12,7 +12,7 @@ impl<'a, 'tcx> RequiredConstsVisitor<'a, 'tcx> {
 }
 
 impl<'tcx> Visitor<'tcx> for RequiredConstsVisitor<'_, 'tcx> {
-    fn visit_constant(&mut self, constant: &ConstOperand<'tcx>, _: Location) {
+    fn visit_const_operand(&mut self, constant: &ConstOperand<'tcx>, _: Location) {
         if constant.const_.is_required_const() {
             self.required_consts.push(*constant);
         }
diff --git a/compiler/rustc_mir_transform/src/reveal_all.rs b/compiler/rustc_mir_transform/src/reveal_all.rs
index 4d2eca57840..5eaa024f846 100644
--- a/compiler/rustc_mir_transform/src/reveal_all.rs
+++ b/compiler/rustc_mir_transform/src/reveal_all.rs
@@ -49,14 +49,14 @@ impl<'tcx> MutVisitor<'tcx> for RevealAllVisitor<'tcx> {
     }
 
     #[inline]
-    fn visit_constant(&mut self, constant: &mut ConstOperand<'tcx>, location: Location) {
+    fn visit_const_operand(&mut self, constant: &mut ConstOperand<'tcx>, location: Location) {
         // We have to use `try_normalize_erasing_regions` here, since it's
         // possible that we visit impossible-to-satisfy where clauses here,
         // see #91745
         if let Ok(c) = self.tcx.try_normalize_erasing_regions(self.param_env, constant.const_) {
             constant.const_ = c;
         }
-        self.super_constant(constant, location);
+        self.super_const_operand(constant, location);
     }
 
     #[inline]
diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs
index d03c2d18c0c..825f8957187 100644
--- a/compiler/rustc_mir_transform/src/shim.rs
+++ b/compiler/rustc_mir_transform/src/shim.rs
@@ -29,16 +29,16 @@ pub fn provide(providers: &mut Providers) {
     providers.mir_shims = make_shim;
 }
 
-fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'tcx> {
+fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceKind<'tcx>) -> Body<'tcx> {
     debug!("make_shim({:?})", instance);
 
     let mut result = match instance {
-        ty::InstanceDef::Item(..) => bug!("item {:?} passed to make_shim", instance),
-        ty::InstanceDef::VTableShim(def_id) => {
+        ty::InstanceKind::Item(..) => bug!("item {:?} passed to make_shim", instance),
+        ty::InstanceKind::VTableShim(def_id) => {
             let adjustment = Adjustment::Deref { source: DerefSource::MutPtr };
             build_call_shim(tcx, instance, Some(adjustment), CallKind::Direct(def_id))
         }
-        ty::InstanceDef::FnPtrShim(def_id, ty) => {
+        ty::InstanceKind::FnPtrShim(def_id, ty) => {
             let trait_ = tcx.trait_of_item(def_id).unwrap();
             // Supports `Fn` or `async Fn` traits.
             let adjustment = match tcx
@@ -58,10 +58,10 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'
         // a virtual call, or a direct call to a function for which
         // indirect calls must be codegen'd differently than direct ones
         // (such as `#[track_caller]`).
-        ty::InstanceDef::ReifyShim(def_id, _) => {
+        ty::InstanceKind::ReifyShim(def_id, _) => {
             build_call_shim(tcx, instance, None, CallKind::Direct(def_id))
         }
-        ty::InstanceDef::ClosureOnceShim { call_once: _, track_caller: _ } => {
+        ty::InstanceKind::ClosureOnceShim { call_once: _, track_caller: _ } => {
             let fn_mut = tcx.require_lang_item(LangItem::FnMut, None);
             let call_mut = tcx
                 .associated_items(fn_mut)
@@ -73,16 +73,16 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'
             build_call_shim(tcx, instance, Some(Adjustment::RefMut), CallKind::Direct(call_mut))
         }
 
-        ty::InstanceDef::ConstructCoroutineInClosureShim {
+        ty::InstanceKind::ConstructCoroutineInClosureShim {
             coroutine_closure_def_id,
             receiver_by_ref,
         } => build_construct_coroutine_by_move_shim(tcx, coroutine_closure_def_id, receiver_by_ref),
 
-        ty::InstanceDef::CoroutineKindShim { coroutine_def_id } => {
+        ty::InstanceKind::CoroutineKindShim { coroutine_def_id } => {
             return tcx.optimized_mir(coroutine_def_id).coroutine_by_move_body().unwrap().clone();
         }
 
-        ty::InstanceDef::DropGlue(def_id, ty) => {
+        ty::InstanceKind::DropGlue(def_id, ty) => {
             // FIXME(#91576): Drop shims for coroutines aren't subject to the MIR passes at the end
             // of this function. Is this intentional?
             if let Some(ty::Coroutine(coroutine_def_id, args)) = ty.map(Ty::kind) {
@@ -127,16 +127,16 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'
 
             build_drop_shim(tcx, def_id, ty)
         }
-        ty::InstanceDef::ThreadLocalShim(..) => build_thread_local_shim(tcx, instance),
-        ty::InstanceDef::CloneShim(def_id, ty) => build_clone_shim(tcx, def_id, ty),
-        ty::InstanceDef::FnPtrAddrShim(def_id, ty) => build_fn_ptr_addr_shim(tcx, def_id, ty),
-        ty::InstanceDef::AsyncDropGlueCtorShim(def_id, ty) => {
+        ty::InstanceKind::ThreadLocalShim(..) => build_thread_local_shim(tcx, instance),
+        ty::InstanceKind::CloneShim(def_id, ty) => build_clone_shim(tcx, def_id, ty),
+        ty::InstanceKind::FnPtrAddrShim(def_id, ty) => build_fn_ptr_addr_shim(tcx, def_id, ty),
+        ty::InstanceKind::AsyncDropGlueCtorShim(def_id, ty) => {
             async_destructor_ctor::build_async_destructor_ctor_shim(tcx, def_id, ty)
         }
-        ty::InstanceDef::Virtual(..) => {
-            bug!("InstanceDef::Virtual ({:?}) is for direct calls only", instance)
+        ty::InstanceKind::Virtual(..) => {
+            bug!("InstanceKind::Virtual ({:?}) is for direct calls only", instance)
         }
-        ty::InstanceDef::Intrinsic(_) => {
+        ty::InstanceKind::Intrinsic(_) => {
             bug!("creating shims from intrinsics ({:?}) is unsupported", instance)
         }
     };
@@ -240,7 +240,7 @@ fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>)
     block(&mut blocks, TerminatorKind::Goto { target: return_block });
     block(&mut blocks, TerminatorKind::Return);
 
-    let source = MirSource::from_instance(ty::InstanceDef::DropGlue(def_id, ty));
+    let source = MirSource::from_instance(ty::InstanceKind::DropGlue(def_id, ty));
     let mut body =
         new_body(source, blocks, local_decls_for_sig(&sig, span), sig.inputs().len(), span);
 
@@ -392,7 +392,10 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> {
     }
 }
 
-fn build_thread_local_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'tcx> {
+fn build_thread_local_shim<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    instance: ty::InstanceKind<'tcx>,
+) -> Body<'tcx> {
     let def_id = instance.def_id();
 
     let span = tcx.def_span(def_id);
@@ -472,7 +475,7 @@ impl<'tcx> CloneShimBuilder<'tcx> {
     }
 
     fn into_mir(self) -> Body<'tcx> {
-        let source = MirSource::from_instance(ty::InstanceDef::CloneShim(
+        let source = MirSource::from_instance(ty::InstanceKind::CloneShim(
             self.def_id,
             self.sig.inputs_and_output[0],
         ));
@@ -682,14 +685,14 @@ impl<'tcx> CloneShimBuilder<'tcx> {
 #[instrument(level = "debug", skip(tcx), ret)]
 fn build_call_shim<'tcx>(
     tcx: TyCtxt<'tcx>,
-    instance: ty::InstanceDef<'tcx>,
+    instance: ty::InstanceKind<'tcx>,
     rcvr_adjustment: Option<Adjustment>,
     call_kind: CallKind<'tcx>,
 ) -> Body<'tcx> {
     // `FnPtrShim` contains the fn pointer type that a call shim is being built for - this is used
     // to instantiate into the signature of the shim. It is not necessary for users of this
-    // MIR body to perform further instantiations (see `InstanceDef::has_polymorphic_mir_body`).
-    let (sig_args, untuple_args) = if let ty::InstanceDef::FnPtrShim(_, ty) = instance {
+    // MIR body to perform further instantiations (see `InstanceKind::has_polymorphic_mir_body`).
+    let (sig_args, untuple_args) = if let ty::InstanceKind::FnPtrShim(_, ty) = instance {
         let sig = tcx.instantiate_bound_regions_with_erased(ty.fn_sig(tcx));
 
         let untuple_args = sig.inputs();
@@ -741,8 +744,8 @@ fn build_call_shim<'tcx>(
     }
 
     // FIXME(eddyb) avoid having this snippet both here and in
-    // `Instance::fn_sig` (introduce `InstanceDef::fn_sig`?).
-    if let ty::InstanceDef::VTableShim(..) = instance {
+    // `Instance::fn_sig` (introduce `InstanceKind::fn_sig`?).
+    if let ty::InstanceKind::VTableShim(..) = instance {
         // Modify fn(self, ...) to fn(self: *mut Self, ...)
         let mut inputs_and_output = sig.inputs_and_output.to_vec();
         let self_arg = &mut inputs_and_output[0];
@@ -1007,7 +1010,7 @@ fn build_fn_ptr_addr_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'t
         terminator: Some(Terminator { source_info, kind: TerminatorKind::Return }),
         is_cleanup: false,
     };
-    let source = MirSource::from_instance(ty::InstanceDef::FnPtrAddrShim(def_id, self_ty));
+    let source = MirSource::from_instance(ty::InstanceKind::FnPtrAddrShim(def_id, self_ty));
     new_body(source, IndexVec::from_elem_n(start_block, 1), locals, sig.inputs().len(), span)
 }
 
@@ -1087,7 +1090,7 @@ fn build_construct_coroutine_by_move_shim<'tcx>(
         is_cleanup: false,
     };
 
-    let source = MirSource::from_instance(ty::InstanceDef::ConstructCoroutineInClosureShim {
+    let source = MirSource::from_instance(ty::InstanceKind::ConstructCoroutineInClosureShim {
         coroutine_closure_def_id,
         receiver_by_ref,
     });
diff --git a/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs b/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs
index aa9c87d8f80..ea4f5fca59e 100644
--- a/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs
+++ b/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs
@@ -529,7 +529,7 @@ impl<'tcx> AsyncDestructorCtorShimBuilder<'tcx> {
 
         last_bb.terminator = Some(Terminator { source_info, kind: TerminatorKind::Return });
 
-        let source = MirSource::from_instance(ty::InstanceDef::AsyncDropGlueCtorShim(
+        let source = MirSource::from_instance(ty::InstanceKind::AsyncDropGlueCtorShim(
             self.def_id,
             self.self_ty,
         ));
@@ -561,7 +561,7 @@ impl<'tcx> AsyncDestructorCtorShimBuilder<'tcx> {
 
             // If projection of Discriminant then compare with `Ty::discriminant_ty`
             if let ty::Alias(ty::Projection, ty::AliasTy { args, def_id, .. }) = expected_ty.kind()
-                && Some(*def_id) == self.tcx.lang_items().discriminant_type()
+                && self.tcx.is_lang_item(*def_id, LangItem::Discriminant)
                 && args.first().unwrap().as_type().unwrap().discriminant_ty(self.tcx) == operand_ty
             {
                 return;
diff --git a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
index 03907babf2b..e174cccdad6 100644
--- a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
+++ b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
@@ -49,7 +49,7 @@ impl<'tcx> MirPass<'tcx> for SimplifyComparisonIntegral {
                     let layout = tcx
                         .layout_of(param_env.and(opt.branch_value_ty))
                         .expect("if we have an evaluated constant we must know the layout");
-                    int.assert_bits(layout.size)
+                    int.to_bits(layout.size)
                 }
                 Scalar::Ptr(..) => continue,
             };
diff --git a/compiler/rustc_mir_transform/src/single_use_consts.rs b/compiler/rustc_mir_transform/src/single_use_consts.rs
new file mode 100644
index 00000000000..93736e55996
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/single_use_consts.rs
@@ -0,0 +1,199 @@
+use rustc_index::{bit_set::BitSet, IndexVec};
+use rustc_middle::bug;
+use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+/// Various parts of MIR building introduce temporaries that are commonly not needed.
+///
+/// Notably, `if CONST` and `match CONST` end up being used-once temporaries, which
+/// obfuscates the structure for other passes and codegen, which would like to always
+/// be able to just see the constant directly.
+///
+/// At higher optimization levels fancier passes like GVN will take care of this
+/// in a more general fashion, but this handles the easy cases so can run in debug.
+///
+/// This only removes constants with a single-use because re-evaluating constants
+/// isn't always an improvement, especially for large ones.
+///
+/// It also removes *never*-used constants, since it had all the information
+/// needed to do that too, including updating the debug info.
+pub struct SingleUseConsts;
+
+impl<'tcx> MirPass<'tcx> for SingleUseConsts {
+    fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+        sess.mir_opt_level() > 0
+    }
+
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        let mut finder = SingleUseConstsFinder {
+            ineligible_locals: BitSet::new_empty(body.local_decls.len()),
+            locations: IndexVec::from_elem(LocationPair::new(), &body.local_decls),
+            locals_in_debug_info: BitSet::new_empty(body.local_decls.len()),
+        };
+
+        finder.ineligible_locals.insert_range(..=Local::from_usize(body.arg_count));
+
+        finder.visit_body(body);
+
+        for (local, locations) in finder.locations.iter_enumerated() {
+            if finder.ineligible_locals.contains(local) {
+                continue;
+            }
+
+            let Some(init_loc) = locations.init_loc else {
+                continue;
+            };
+
+            // We're only changing an operand, not the terminator kinds or successors
+            let basic_blocks = body.basic_blocks.as_mut_preserves_cfg();
+            let init_statement =
+                basic_blocks[init_loc.block].statements[init_loc.statement_index].replace_nop();
+            let StatementKind::Assign(place_and_rvalue) = init_statement.kind else {
+                bug!("No longer an assign?");
+            };
+            let (place, rvalue) = *place_and_rvalue;
+            assert_eq!(place.as_local(), Some(local));
+            let Rvalue::Use(operand) = rvalue else { bug!("No longer a use?") };
+
+            let mut replacer = LocalReplacer { tcx, local, operand: Some(operand) };
+
+            if finder.locals_in_debug_info.contains(local) {
+                for var_debug_info in &mut body.var_debug_info {
+                    replacer.visit_var_debug_info(var_debug_info);
+                }
+            }
+
+            let Some(use_loc) = locations.use_loc else { continue };
+
+            let use_block = &mut basic_blocks[use_loc.block];
+            if let Some(use_statement) = use_block.statements.get_mut(use_loc.statement_index) {
+                replacer.visit_statement(use_statement, use_loc);
+            } else {
+                replacer.visit_terminator(use_block.terminator_mut(), use_loc);
+            }
+
+            if replacer.operand.is_some() {
+                bug!(
+                    "operand wasn't used replacing local {local:?} with locations {locations:?} in body {body:#?}"
+                );
+            }
+        }
+    }
+}
+
+#[derive(Copy, Clone, Debug)]
+struct LocationPair {
+    init_loc: Option<Location>,
+    use_loc: Option<Location>,
+}
+
+impl LocationPair {
+    fn new() -> Self {
+        Self { init_loc: None, use_loc: None }
+    }
+}
+
+struct SingleUseConstsFinder {
+    ineligible_locals: BitSet<Local>,
+    locations: IndexVec<Local, LocationPair>,
+    locals_in_debug_info: BitSet<Local>,
+}
+
+impl<'tcx> Visitor<'tcx> for SingleUseConstsFinder {
+    fn visit_assign(&mut self, place: &Place<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) {
+        if let Some(local) = place.as_local()
+            && let Rvalue::Use(operand) = rvalue
+            && let Operand::Constant(_) = operand
+        {
+            let locations = &mut self.locations[local];
+            if locations.init_loc.is_some() {
+                self.ineligible_locals.insert(local);
+            } else {
+                locations.init_loc = Some(location);
+            }
+        } else {
+            self.super_assign(place, rvalue, location);
+        }
+    }
+
+    fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
+        if let Some(place) = operand.place()
+            && let Some(local) = place.as_local()
+        {
+            let locations = &mut self.locations[local];
+            if locations.use_loc.is_some() {
+                self.ineligible_locals.insert(local);
+            } else {
+                locations.use_loc = Some(location);
+            }
+        } else {
+            self.super_operand(operand, location);
+        }
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        match &statement.kind {
+            // Storage markers are irrelevant to this.
+            StatementKind::StorageLive(_) | StatementKind::StorageDead(_) => {}
+            _ => self.super_statement(statement, location),
+        }
+    }
+
+    fn visit_var_debug_info(&mut self, var_debug_info: &VarDebugInfo<'tcx>) {
+        if let VarDebugInfoContents::Place(place) = &var_debug_info.value
+            && let Some(local) = place.as_local()
+        {
+            self.locals_in_debug_info.insert(local);
+        } else {
+            self.super_var_debug_info(var_debug_info);
+        }
+    }
+
+    fn visit_local(&mut self, local: Local, _context: PlaceContext, _location: Location) {
+        // If there's any path that gets here, rather than being understood elsewhere,
+        // then we'd better not do anything with this local.
+        self.ineligible_locals.insert(local);
+    }
+}
+
+struct LocalReplacer<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    local: Local,
+    operand: Option<Operand<'tcx>>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for LocalReplacer<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_operand(&mut self, operand: &mut Operand<'tcx>, _location: Location) {
+        if let Operand::Copy(place) | Operand::Move(place) = operand
+            && let Some(local) = place.as_local()
+            && local == self.local
+        {
+            *operand = self.operand.take().unwrap_or_else(|| {
+                bug!("there was a second use of the operand");
+            });
+        }
+    }
+
+    fn visit_var_debug_info(&mut self, var_debug_info: &mut VarDebugInfo<'tcx>) {
+        if let VarDebugInfoContents::Place(place) = &var_debug_info.value
+            && let Some(local) = place.as_local()
+            && local == self.local
+        {
+            let const_op = self
+                .operand
+                .as_ref()
+                .unwrap_or_else(|| {
+                    bug!("the operand was already stolen");
+                })
+                .constant()
+                .unwrap()
+                .clone();
+            var_debug_info.value = VarDebugInfoContents::Const(const_op);
+        }
+    }
+}
diff --git a/compiler/rustc_mir_transform/src/sroa.rs b/compiler/rustc_mir_transform/src/sroa.rs
index f19c34cae7a..c2108795372 100644
--- a/compiler/rustc_mir_transform/src/sroa.rs
+++ b/compiler/rustc_mir_transform/src/sroa.rs
@@ -1,4 +1,5 @@
 use rustc_data_structures::flat_map_in_place::FlatMapInPlace;
+use rustc_hir::LangItem;
 use rustc_index::bit_set::{BitSet, GrowableBitSet};
 use rustc_index::IndexVec;
 use rustc_middle::bug;
@@ -70,7 +71,7 @@ fn escaping_locals<'tcx>(
                 // Exclude #[repr(simd)] types so that they are not de-optimized into an array
                 return true;
             }
-            if Some(def.did()) == tcx.lang_items().dyn_metadata() {
+            if tcx.is_lang_item(def.did(), LangItem::DynMetadata) {
                 // codegen wants to see the `DynMetadata<T>`,
                 // not the inner reference-to-opaque-type.
                 return true;
diff --git a/compiler/rustc_mir_transform/src/validate.rs b/compiler/rustc_mir_transform/src/validate.rs
index 3b4d4c93877..2cca1a6f507 100644
--- a/compiler/rustc_mir_transform/src/validate.rs
+++ b/compiler/rustc_mir_transform/src/validate.rs
@@ -1,16 +1,17 @@
 //! Validates the MIR to ensure that invariants are upheld.
 
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir::LangItem;
 use rustc_index::bit_set::BitSet;
 use rustc_index::IndexVec;
 use rustc_infer::traits::Reveal;
 use rustc_middle::mir::coverage::CoverageKind;
-use rustc_middle::mir::interpret::Scalar;
 use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor};
 use rustc_middle::mir::*;
 use rustc_middle::ty::adjustment::PointerCoercion;
 use rustc_middle::ty::{
-    self, CoroutineArgsExt, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt, Variance,
+    self, CoroutineArgsExt, InstanceKind, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt,
+    Variance,
 };
 use rustc_middle::{bug, span_bug};
 use rustc_target::abi::{Size, FIRST_VARIANT};
@@ -43,7 +44,7 @@ impl<'tcx> MirPass<'tcx> for Validator {
         // terribly important that they pass the validator. However, I think other passes might
         // still see them, in which case they might be surprised. It would probably be better if we
         // didn't put this through the MIR pipeline at all.
-        if matches!(body.source.instance, InstanceDef::Intrinsic(..) | InstanceDef::Virtual(..)) {
+        if matches!(body.source.instance, InstanceKind::Intrinsic(..) | InstanceKind::Virtual(..)) {
             return;
         }
         let def_id = body.source.def_id();
@@ -94,7 +95,7 @@ impl<'tcx> MirPass<'tcx> for Validator {
         }
 
         if let MirPhase::Runtime(_) = body.phase {
-            if let ty::InstanceDef::Item(_) = body.source.instance {
+            if let ty::InstanceKind::Item(_) = body.source.instance {
                 if body.has_free_regions() {
                     cfg_checker.fail(
                         Location::START,
@@ -689,7 +690,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                     }
                     ty::Adt(adt_def, args) => {
                         // see <https://github.com/rust-lang/rust/blob/7601adcc764d42c9f2984082b49948af652df986/compiler/rustc_middle/src/ty/layout.rs#L861-L864>
-                        if Some(adt_def.did()) == self.tcx.lang_items().dyn_metadata() {
+                        if self.tcx.is_lang_item(adt_def.did(), LangItem::DynMetadata) {
                             self.fail(
                                 location,
                                 format!(
@@ -1478,7 +1479,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 });
 
                 for (value, _) in targets.iter() {
-                    if Scalar::<()>::try_from_uint(value, size).is_none() {
+                    if ScalarInt::try_from_uint(value, size).is_none() {
                         self.fail(
                             location,
                             format!("the value {value:#x} is not a proper {switch_ty:?}"),