about summary refs log tree commit diff
path: root/compiler/rustc_mir_transform
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_mir_transform')
-rw-r--r--compiler/rustc_mir_transform/src/add_retag.rs4
-rw-r--r--compiler/rustc_mir_transform/src/check_unsafety.rs9
-rw-r--r--compiler/rustc_mir_transform/src/coroutine.rs4
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs281
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs28
-rw-r--r--compiler/rustc_mir_transform/src/deduce_param_attrs.rs4
-rw-r--r--compiler/rustc_mir_transform/src/dest_prop.rs3
-rw-r--r--compiler/rustc_mir_transform/src/function_item_references.rs8
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs35
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs12
-rw-r--r--compiler/rustc_mir_transform/src/inline/cycle.rs4
-rw-r--r--compiler/rustc_mir_transform/src/instsimplify.rs17
-rw-r--r--compiler/rustc_mir_transform/src/jump_threading.rs6
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs10
-rw-r--r--compiler/rustc_mir_transform/src/ref_prop.rs8
-rw-r--r--compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs5
-rw-r--r--compiler/rustc_mir_transform/src/shim.rs6
-rw-r--r--compiler/rustc_mir_transform/src/ssa.rs2
18 files changed, 232 insertions, 214 deletions
diff --git a/compiler/rustc_mir_transform/src/add_retag.rs b/compiler/rustc_mir_transform/src/add_retag.rs
index 94077c63057..430d9572e75 100644
--- a/compiler/rustc_mir_transform/src/add_retag.rs
+++ b/compiler/rustc_mir_transform/src/add_retag.rs
@@ -36,10 +36,10 @@ fn may_contain_reference<'tcx>(ty: Ty<'tcx>, depth: u32, tcx: TyCtxt<'tcx>) -> b
         ty::Tuple(tys) => {
             depth == 0 || tys.iter().any(|ty| may_contain_reference(ty, depth - 1, tcx))
         }
-        ty::Adt(adt, subst) => {
+        ty::Adt(adt, args) => {
             depth == 0
                 || adt.variants().iter().any(|v| {
-                    v.fields.iter().any(|f| may_contain_reference(f.ty(tcx, subst), depth - 1, tcx))
+                    v.fields.iter().any(|f| may_contain_reference(f.ty(tcx, args), depth - 1, tcx))
                 })
         }
         // Conservative fallback
diff --git a/compiler/rustc_mir_transform/src/check_unsafety.rs b/compiler/rustc_mir_transform/src/check_unsafety.rs
index fbb62695383..a0c3de3af58 100644
--- a/compiler/rustc_mir_transform/src/check_unsafety.rs
+++ b/compiler/rustc_mir_transform/src/check_unsafety.rs
@@ -243,10 +243,11 @@ impl<'tcx> Visitor<'tcx> for UnsafetyChecker<'_, 'tcx> {
                     // old value is being dropped.
                     let assigned_ty = place.ty(&self.body.local_decls, self.tcx).ty;
                     if assigned_ty.needs_drop(self.tcx, self.param_env) {
-                        // This would be unsafe, but should be outright impossible since we reject such unions.
-                        self.tcx.dcx().span_delayed_bug(
-                            self.source_info.span,
-                            format!("union fields that need dropping should be impossible: {assigned_ty}")
+                        // This would be unsafe, but should be outright impossible since we reject
+                        // such unions.
+                        assert!(
+                            self.tcx.dcx().has_errors().is_some(),
+                            "union fields that need dropping should be impossible: {assigned_ty}"
                         );
                     }
                 } else {
diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs
index 297b2fa143d..a0851aa557b 100644
--- a/compiler/rustc_mir_transform/src/coroutine.rs
+++ b/compiler/rustc_mir_transform/src/coroutine.rs
@@ -726,7 +726,7 @@ fn replace_resume_ty_local<'tcx>(
 /// The async lowering step and the type / lifetime inference / checking are
 /// still using the `resume` argument for the time being. After this transform,
 /// the coroutine body doesn't have the `resume` argument.
-fn transform_gen_context<'tcx>(_tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+fn transform_gen_context<'tcx>(body: &mut Body<'tcx>) {
     // This leaves the local representing the `resume` argument in place,
     // but turns it into a regular local variable. This is cheaper than
     // adjusting all local references in the body after removing it.
@@ -1733,7 +1733,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
 
         // Remove the context argument within generator bodies.
         if matches!(coroutine_kind, CoroutineKind::Desugared(CoroutineDesugaring::Gen, _)) {
-            transform_gen_context(tcx, body);
+            transform_gen_context(body);
         }
 
         // The original arguments to the function are no longer arguments, mark them as such.
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
index d3d0c7bcc95..934e77e7deb 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -1,9 +1,10 @@
 use rustc_data_structures::graph::WithNumNodes;
 use rustc_index::bit_set::BitSet;
 use rustc_middle::mir;
-use rustc_span::{BytePos, Span, DUMMY_SP};
+use rustc_span::{BytePos, Span};
 
 use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph, START_BCB};
+use crate::coverage::spans::from_mir::SpanFromMir;
 use crate::coverage::ExtractedHirInfo;
 
 mod from_mir;
@@ -61,7 +62,7 @@ pub(super) fn generate_coverage_spans(
             basic_coverage_blocks,
         );
         let coverage_spans = SpansRefiner::refine_sorted_spans(basic_coverage_blocks, sorted_spans);
-        mappings.extend(coverage_spans.into_iter().map(|CoverageSpan { bcb, span, .. }| {
+        mappings.extend(coverage_spans.into_iter().map(|RefinedCovspan { bcb, span, .. }| {
             // Each span produced by the generator represents an ordinary code region.
             BcbMapping { kind: BcbMappingKind::Code(bcb), span }
         }));
@@ -85,18 +86,36 @@ pub(super) fn generate_coverage_spans(
     Some(CoverageSpans { bcb_has_mappings, mappings })
 }
 
-/// A BCB is deconstructed into one or more `Span`s. Each `Span` maps to a `CoverageSpan` that
-/// references the originating BCB and one or more MIR `Statement`s and/or `Terminator`s.
-/// Initially, the `Span`s come from the `Statement`s and `Terminator`s, but subsequent
-/// transforms can combine adjacent `Span`s and `CoverageSpan` from the same BCB, merging the
-/// `merged_spans` vectors, and the `Span`s to cover the extent of the combined `Span`s.
-///
-/// Note: A span merged into another CoverageSpan may come from a `BasicBlock` that
-/// is not part of the `CoverageSpan` bcb if the statement was included because it's `Span` matches
-/// or is subsumed by the `Span` associated with this `CoverageSpan`, and it's `BasicBlock`
-/// `dominates()` the `BasicBlock`s in this `CoverageSpan`.
-#[derive(Debug, Clone)]
-struct CoverageSpan {
+#[derive(Debug)]
+struct CurrCovspan {
+    /// This is used as the basis for [`PrevCovspan::original_span`], so it must
+    /// not be modified.
+    span: Span,
+    bcb: BasicCoverageBlock,
+    is_closure: bool,
+}
+
+impl CurrCovspan {
+    fn new(span: Span, bcb: BasicCoverageBlock, is_closure: bool) -> Self {
+        Self { span, bcb, is_closure }
+    }
+
+    fn into_prev(self) -> PrevCovspan {
+        let Self { span, bcb, is_closure } = self;
+        PrevCovspan { original_span: span, span, bcb, merged_spans: vec![span], is_closure }
+    }
+
+    fn into_refined(self) -> RefinedCovspan {
+        // This is only called in cases where `curr` is a closure span that has
+        // been carved out of `prev`.
+        debug_assert!(self.is_closure);
+        self.into_prev().into_refined()
+    }
+}
+
+#[derive(Debug)]
+struct PrevCovspan {
+    original_span: Span,
     span: Span,
     bcb: BasicCoverageBlock,
     /// List of all the original spans from MIR that have been merged into this
@@ -105,37 +124,82 @@ struct CoverageSpan {
     is_closure: bool,
 }
 
-impl CoverageSpan {
-    fn new(span: Span, bcb: BasicCoverageBlock, is_closure: bool) -> Self {
-        Self { span, bcb, merged_spans: vec![span], is_closure }
+impl PrevCovspan {
+    fn is_mergeable(&self, other: &CurrCovspan) -> bool {
+        self.bcb == other.bcb && !self.is_closure && !other.is_closure
     }
 
-    pub fn merge_from(&mut self, other: &Self) {
+    fn merge_from(&mut self, other: &CurrCovspan) {
         debug_assert!(self.is_mergeable(other));
         self.span = self.span.to(other.span);
-        self.merged_spans.extend_from_slice(&other.merged_spans);
+        self.merged_spans.push(other.span);
     }
 
-    pub fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) {
+    fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) {
         self.merged_spans.retain(|span| span.hi() <= cutoff_pos);
         if let Some(max_hi) = self.merged_spans.iter().map(|span| span.hi()).max() {
             self.span = self.span.with_hi(max_hi);
         }
     }
 
-    #[inline]
-    pub fn is_mergeable(&self, other: &Self) -> bool {
-        self.is_in_same_bcb(other) && !(self.is_closure || other.is_closure)
+    fn into_dup(self) -> DuplicateCovspan {
+        let Self { original_span, span, bcb, merged_spans: _, is_closure } = self;
+        // Only unmodified spans end up in `pending_dups`.
+        debug_assert_eq!(original_span, span);
+        DuplicateCovspan { span, bcb, is_closure }
     }
 
-    #[inline]
-    pub fn is_in_same_bcb(&self, other: &Self) -> bool {
-        self.bcb == other.bcb
+    fn refined_copy(&self) -> RefinedCovspan {
+        let &Self { original_span: _, span, bcb, merged_spans: _, is_closure } = self;
+        RefinedCovspan { span, bcb, is_closure }
+    }
+
+    fn into_refined(self) -> RefinedCovspan {
+        self.refined_copy()
     }
 }
 
-/// Converts the initial set of `CoverageSpan`s (one per MIR `Statement` or `Terminator`) into a
-/// minimal set of `CoverageSpan`s, using the BCB CFG to determine where it is safe and useful to:
+#[derive(Debug)]
+struct DuplicateCovspan {
+    span: Span,
+    bcb: BasicCoverageBlock,
+    is_closure: bool,
+}
+
+impl DuplicateCovspan {
+    /// Returns a copy of this covspan, as a [`RefinedCovspan`].
+    /// Should only be called in places that would otherwise clone this covspan.
+    fn refined_copy(&self) -> RefinedCovspan {
+        let &Self { span, bcb, is_closure } = self;
+        RefinedCovspan { span, bcb, is_closure }
+    }
+
+    fn into_refined(self) -> RefinedCovspan {
+        // Even though we consume self, we can just reuse the copying impl.
+        self.refined_copy()
+    }
+}
+
+#[derive(Debug)]
+struct RefinedCovspan {
+    span: Span,
+    bcb: BasicCoverageBlock,
+    is_closure: bool,
+}
+
+impl RefinedCovspan {
+    fn is_mergeable(&self, other: &Self) -> bool {
+        self.bcb == other.bcb && !self.is_closure && !other.is_closure
+    }
+
+    fn merge_from(&mut self, other: &Self) {
+        debug_assert!(self.is_mergeable(other));
+        self.span = self.span.to(other.span);
+    }
+}
+
+/// Converts the initial set of coverage spans (one per MIR `Statement` or `Terminator`) into a
+/// minimal set of coverage spans, using the BCB CFG to determine where it is safe and useful to:
 ///
 ///  * Remove duplicate source code coverage regions
 ///  * Merge spans that represent continuous (both in source code and control flow), non-branching
@@ -145,43 +209,33 @@ struct SpansRefiner<'a> {
     /// The BasicCoverageBlock Control Flow Graph (BCB CFG).
     basic_coverage_blocks: &'a CoverageGraph,
 
-    /// The initial set of `CoverageSpan`s, sorted by `Span` (`lo` and `hi`) and by relative
+    /// The initial set of coverage spans, sorted by `Span` (`lo` and `hi`) and by relative
     /// dominance between the `BasicCoverageBlock`s of equal `Span`s.
-    sorted_spans_iter: std::vec::IntoIter<CoverageSpan>,
+    sorted_spans_iter: std::vec::IntoIter<SpanFromMir>,
 
-    /// The current `CoverageSpan` to compare to its `prev`, to possibly merge, discard, force the
+    /// The current coverage span to compare to its `prev`, to possibly merge, discard, force the
     /// discard of the `prev` (and or `pending_dups`), or keep both (with `prev` moved to
     /// `pending_dups`). If `curr` is not discarded or merged, it becomes `prev` for the next
     /// iteration.
-    some_curr: Option<CoverageSpan>,
-
-    /// The original `span` for `curr`, in case `curr.span()` is modified. The `curr_original_span`
-    /// **must not be mutated** (except when advancing to the next `curr`), even if `curr.span()`
-    /// is mutated.
-    curr_original_span: Span,
+    some_curr: Option<CurrCovspan>,
 
-    /// The CoverageSpan from a prior iteration; typically assigned from that iteration's `curr`.
+    /// The coverage span from a prior iteration; typically assigned from that iteration's `curr`.
     /// If that `curr` was discarded, `prev` retains its value from the previous iteration.
-    some_prev: Option<CoverageSpan>,
+    some_prev: Option<PrevCovspan>,
 
-    /// Assigned from `curr_original_span` from the previous iteration. The `prev_original_span`
-    /// **must not be mutated** (except when advancing to the next `prev`), even if `prev.span()`
-    /// is mutated.
-    prev_original_span: Span,
-
-    /// One or more `CoverageSpan`s with the same `Span` but different `BasicCoverageBlock`s, and
+    /// One or more coverage spans with the same `Span` but different `BasicCoverageBlock`s, and
     /// no `BasicCoverageBlock` in this list dominates another `BasicCoverageBlock` in the list.
     /// If a new `curr` span also fits this criteria (compared to an existing list of
-    /// `pending_dups`), that `curr` `CoverageSpan` moves to `prev` before possibly being added to
+    /// `pending_dups`), that `curr` moves to `prev` before possibly being added to
     /// the `pending_dups` list, on the next iteration. As a result, if `prev` and `pending_dups`
     /// have the same `Span`, the criteria for `pending_dups` holds for `prev` as well: a `prev`
     /// with a matching `Span` does not dominate any `pending_dup` and no `pending_dup` dominates a
     /// `prev` with a matching `Span`)
-    pending_dups: Vec<CoverageSpan>,
+    pending_dups: Vec<DuplicateCovspan>,
 
-    /// The final `CoverageSpan`s to add to the coverage map. A `Counter` or `Expression`
-    /// will also be injected into the MIR for each `CoverageSpan`.
-    refined_spans: Vec<CoverageSpan>,
+    /// The final coverage spans to add to the coverage map. A `Counter` or `Expression`
+    /// will also be injected into the MIR for each BCB that has associated spans.
+    refined_spans: Vec<RefinedCovspan>,
 }
 
 impl<'a> SpansRefiner<'a> {
@@ -190,15 +244,13 @@ impl<'a> SpansRefiner<'a> {
     /// and carving holes in spans when they overlap in unwanted ways.
     fn refine_sorted_spans(
         basic_coverage_blocks: &'a CoverageGraph,
-        sorted_spans: Vec<CoverageSpan>,
-    ) -> Vec<CoverageSpan> {
+        sorted_spans: Vec<SpanFromMir>,
+    ) -> Vec<RefinedCovspan> {
         let this = Self {
             basic_coverage_blocks,
             sorted_spans_iter: sorted_spans.into_iter(),
             some_curr: None,
-            curr_original_span: DUMMY_SP,
             some_prev: None,
-            prev_original_span: DUMMY_SP,
             pending_dups: Vec::new(),
             refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2),
         };
@@ -206,9 +258,9 @@ impl<'a> SpansRefiner<'a> {
         this.to_refined_spans()
     }
 
-    /// Iterate through the sorted `CoverageSpan`s, and return the refined list of merged and
-    /// de-duplicated `CoverageSpan`s.
-    fn to_refined_spans(mut self) -> Vec<CoverageSpan> {
+    /// Iterate through the sorted coverage spans, and return the refined list of merged and
+    /// de-duplicated spans.
+    fn to_refined_spans(mut self) -> Vec<RefinedCovspan> {
         while self.next_coverage_span() {
             // For the first span we don't have `prev` set, so most of the
             // span-processing steps don't make sense yet.
@@ -221,16 +273,15 @@ impl<'a> SpansRefiner<'a> {
             let prev = self.prev();
             let curr = self.curr();
 
-            if curr.is_mergeable(prev) {
+            if prev.is_mergeable(curr) {
                 debug!("  same bcb (and neither is a closure), merge with prev={prev:?}");
-                let prev = self.take_prev();
-                self.curr_mut().merge_from(&prev);
-            // Note that curr.span may now differ from curr_original_span
+                let curr = self.take_curr();
+                self.prev_mut().merge_from(&curr);
             } else if prev.span.hi() <= curr.span.lo() {
                 debug!(
                     "  different bcbs and disjoint spans, so keep curr for next iter, and add prev={prev:?}",
                 );
-                let prev = self.take_prev();
+                let prev = self.take_prev().into_refined();
                 self.refined_spans.push(prev);
             } else if prev.is_closure {
                 // drop any equal or overlapping span (`curr`) and keep `prev` to test again in the
@@ -241,9 +292,9 @@ impl<'a> SpansRefiner<'a> {
                 self.take_curr(); // Discards curr.
             } else if curr.is_closure {
                 self.carve_out_span_for_closure();
-            } else if self.prev_original_span == curr.span {
-                // `prev` and `curr` have the same span, or would have had the
-                // same span before `prev` was modified by other spans.
+            } else if prev.original_span == prev.span && prev.span == curr.span {
+                // Prev and curr have the same span, and prev's span hasn't
+                // been modified by other spans.
                 self.update_pending_dups();
             } else {
                 self.cutoff_prev_at_overlapping_curr();
@@ -253,14 +304,14 @@ impl<'a> SpansRefiner<'a> {
         // Drain any remaining dups into the output.
         for dup in self.pending_dups.drain(..) {
             debug!("    ...adding at least one pending dup={:?}", dup);
-            self.refined_spans.push(dup);
+            self.refined_spans.push(dup.into_refined());
         }
 
         // There is usually a final span remaining in `prev` after the loop ends,
         // so add it to the output as well.
         if let Some(prev) = self.some_prev.take() {
             debug!("    AT END, adding last prev={prev:?}");
-            self.refined_spans.push(prev);
+            self.refined_spans.push(prev.into_refined());
         }
 
         // Do one last merge pass, to simplify the output.
@@ -274,7 +325,7 @@ impl<'a> SpansRefiner<'a> {
             }
         });
 
-        // Remove `CoverageSpan`s derived from closures, originally added to ensure the coverage
+        // Remove spans derived from closures, originally added to ensure the coverage
         // regions for the current function leave room for the closure's own coverage regions
         // (injected separately, from the closure's own MIR).
         self.refined_spans.retain(|covspan| !covspan.is_closure);
@@ -282,34 +333,29 @@ impl<'a> SpansRefiner<'a> {
     }
 
     #[track_caller]
-    fn curr(&self) -> &CoverageSpan {
+    fn curr(&self) -> &CurrCovspan {
         self.some_curr.as_ref().unwrap_or_else(|| bug!("some_curr is None (curr)"))
     }
 
-    #[track_caller]
-    fn curr_mut(&mut self) -> &mut CoverageSpan {
-        self.some_curr.as_mut().unwrap_or_else(|| bug!("some_curr is None (curr_mut)"))
-    }
-
     /// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the
     /// `curr` coverage span.
     #[track_caller]
-    fn take_curr(&mut self) -> CoverageSpan {
+    fn take_curr(&mut self) -> CurrCovspan {
         self.some_curr.take().unwrap_or_else(|| bug!("some_curr is None (take_curr)"))
     }
 
     #[track_caller]
-    fn prev(&self) -> &CoverageSpan {
+    fn prev(&self) -> &PrevCovspan {
         self.some_prev.as_ref().unwrap_or_else(|| bug!("some_prev is None (prev)"))
     }
 
     #[track_caller]
-    fn prev_mut(&mut self) -> &mut CoverageSpan {
+    fn prev_mut(&mut self) -> &mut PrevCovspan {
         self.some_prev.as_mut().unwrap_or_else(|| bug!("some_prev is None (prev_mut)"))
     }
 
     #[track_caller]
-    fn take_prev(&mut self) -> CoverageSpan {
+    fn take_prev(&mut self) -> PrevCovspan {
         self.some_prev.take().unwrap_or_else(|| bug!("some_prev is None (take_prev)"))
     }
 
@@ -335,7 +381,7 @@ impl<'a> SpansRefiner<'a> {
         if last_dup.span.hi() <= self.curr().span.lo() {
             for dup in self.pending_dups.drain(..) {
                 debug!("    ...adding at least one pending={:?}", dup);
-                self.refined_spans.push(dup);
+                self.refined_spans.push(dup.into_refined());
             }
         } else {
             self.pending_dups.clear();
@@ -343,11 +389,10 @@ impl<'a> SpansRefiner<'a> {
         assert!(self.pending_dups.is_empty());
     }
 
-    /// Advance `prev` to `curr` (if any), and `curr` to the next `CoverageSpan` in sorted order.
+    /// Advance `prev` to `curr` (if any), and `curr` to the next coverage span in sorted order.
     fn next_coverage_span(&mut self) -> bool {
         if let Some(curr) = self.some_curr.take() {
-            self.some_prev = Some(curr);
-            self.prev_original_span = self.curr_original_span;
+            self.some_prev = Some(curr.into_prev());
         }
         while let Some(curr) = self.sorted_spans_iter.next() {
             debug!("FOR curr={:?}", curr);
@@ -362,10 +407,7 @@ impl<'a> SpansRefiner<'a> {
                     closure?); prev={prev:?}",
                 );
             } else {
-                // Save a copy of the original span for `curr` in case the `CoverageSpan` is changed
-                // by `self.curr_mut().merge_from(prev)`.
-                self.curr_original_span = curr.span;
-                self.some_curr.replace(curr);
+                self.some_curr = Some(CurrCovspan::new(curr.span, curr.bcb, curr.is_closure));
                 self.maybe_flush_pending_dups();
                 return true;
             }
@@ -388,11 +430,11 @@ impl<'a> SpansRefiner<'a> {
         let has_post_closure_span = prev.span.hi() > right_cutoff;
 
         if has_pre_closure_span {
-            let mut pre_closure = self.prev().clone();
+            let mut pre_closure = self.prev().refined_copy();
             pre_closure.span = pre_closure.span.with_hi(left_cutoff);
             debug!("  prev overlaps a closure. Adding span for pre_closure={:?}", pre_closure);
 
-            for mut dup in self.pending_dups.iter().cloned() {
+            for mut dup in self.pending_dups.iter().map(DuplicateCovspan::refined_copy) {
                 dup.span = dup.span.with_hi(left_cutoff);
                 debug!("    ...and at least one pre_closure dup={:?}", dup);
                 self.refined_spans.push(dup);
@@ -402,9 +444,7 @@ impl<'a> SpansRefiner<'a> {
         }
 
         if has_post_closure_span {
-            // Mutate `prev.span()` to start after the closure (and discard curr).
-            // (**NEVER** update `prev_original_span` because it affects the assumptions
-            // about how the `CoverageSpan`s are ordered.)
+            // Mutate `prev.span` to start after the closure (and discard curr).
             self.prev_mut().span = self.prev().span.with_lo(right_cutoff);
             debug!("  Mutated prev.span to start after the closure. prev={:?}", self.prev());
 
@@ -413,25 +453,26 @@ impl<'a> SpansRefiner<'a> {
                 dup.span = dup.span.with_lo(right_cutoff);
             }
 
-            let closure_covspan = self.take_curr(); // Prevent this curr from becoming prev.
+            // Prevent this curr from becoming prev.
+            let closure_covspan = self.take_curr().into_refined();
             self.refined_spans.push(closure_covspan); // since self.prev() was already updated
         } else {
             self.pending_dups.clear();
         }
     }
 
-    /// Called if `curr.span` equals `prev_original_span` (and potentially equal to all
+    /// Called if `curr.span` equals `prev.original_span` (and potentially equal to all
     /// `pending_dups` spans, if any). Keep in mind, `prev.span()` may have been changed.
     /// If prev.span() was merged into other spans (with matching BCB, for instance),
-    /// `prev.span.hi()` will be greater than (further right of) `prev_original_span.hi()`.
+    /// `prev.span.hi()` will be greater than (further right of) `prev.original_span.hi()`.
     /// If prev.span() was split off to the right of a closure, prev.span().lo() will be
-    /// greater than prev_original_span.lo(). The actual span of `prev_original_span` is
+    /// greater than prev.original_span.lo(). The actual span of `prev.original_span` is
     /// not as important as knowing that `prev()` **used to have the same span** as `curr()`,
     /// which means their sort order is still meaningful for determining the dominator
     /// relationship.
     ///
-    /// When two `CoverageSpan`s have the same `Span`, dominated spans can be discarded; but if
-    /// neither `CoverageSpan` dominates the other, both (or possibly more than two) are held,
+    /// When two coverage spans have the same `Span`, dominated spans can be discarded; but if
+    /// neither coverage span dominates the other, both (or possibly more than two) are held,
     /// until their disposition is determined. In this latter case, the `prev` dup is moved into
     /// `pending_dups` so the new `curr` dup can be moved to `prev` for the next iteration.
     fn update_pending_dups(&mut self) {
@@ -439,9 +480,15 @@ impl<'a> SpansRefiner<'a> {
         let curr_bcb = self.curr().bcb;
 
         // Equal coverage spans are ordered by dominators before dominated (if any), so it should be
-        // impossible for `curr` to dominate any previous `CoverageSpan`.
+        // impossible for `curr` to dominate any previous coverage span.
         debug_assert!(!self.basic_coverage_blocks.dominates(curr_bcb, prev_bcb));
 
+        // `prev` is a duplicate of `curr`, so add it to the list of pending dups.
+        // If it dominates `curr`, it will be removed by the subsequent discard step.
+        let prev = self.take_prev().into_dup();
+        debug!(?prev, "adding prev to pending dups");
+        self.pending_dups.push(prev);
+
         let initial_pending_count = self.pending_dups.len();
         if initial_pending_count > 0 {
             self.pending_dups
@@ -454,42 +501,6 @@ impl<'a> SpansRefiner<'a> {
                 );
             }
         }
-
-        if self.basic_coverage_blocks.dominates(prev_bcb, curr_bcb) {
-            debug!(
-                "  different bcbs but SAME spans, and prev dominates curr. Discard prev={:?}",
-                self.prev()
-            );
-            self.cutoff_prev_at_overlapping_curr();
-        // If one span dominates the other, associate the span with the code from the dominated
-        // block only (`curr`), and discard the overlapping portion of the `prev` span. (Note
-        // that if `prev.span` is wider than `prev_original_span`, a `CoverageSpan` will still
-        // be created for `prev`s block, for the non-overlapping portion, left of `curr.span`.)
-        //
-        // For example:
-        //     match somenum {
-        //         x if x < 1 => { ... }
-        //     }...
-        //
-        // The span for the first `x` is referenced by both the pattern block (every time it is
-        // evaluated) and the arm code (only when matched). The counter will be applied only to
-        // the dominated block. This allows coverage to track and highlight things like the
-        // assignment of `x` above, if the branch is matched, making `x` available to the arm
-        // code; and to track and highlight the question mark `?` "try" operator at the end of
-        // a function call returning a `Result`, so the `?` is covered when the function returns
-        // an `Err`, and not counted as covered if the function always returns `Ok`.
-        } else {
-            // Save `prev` in `pending_dups`. (`curr` will become `prev` in the next iteration.)
-            // If the `curr` CoverageSpan is later discarded, `pending_dups` can be discarded as
-            // well; but if `curr` is added to refined_spans, the `pending_dups` will also be added.
-            debug!(
-                "  different bcbs but SAME spans, and neither dominates, so keep curr for \
-                next iter, and, pending upcoming spans (unless overlapping) add prev={:?}",
-                self.prev()
-            );
-            let prev = self.take_prev();
-            self.pending_dups.push(prev);
-        }
     }
 
     /// `curr` overlaps `prev`. If `prev`s span extends left of `curr`s span, keep _only_
@@ -512,7 +523,7 @@ impl<'a> SpansRefiner<'a> {
                 debug!("  ... no non-overlapping statements to add");
             } else {
                 debug!("  ... adding modified prev={:?}", self.prev());
-                let prev = self.take_prev();
+                let prev = self.take_prev().into_refined();
                 self.refined_spans.push(prev);
             }
         } else {
diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
index 01fae7c0bec..9517ede288f 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
@@ -9,7 +9,6 @@ use rustc_span::{ExpnKind, MacroKind, Span, Symbol};
 use crate::coverage::graph::{
     BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph, START_BCB,
 };
-use crate::coverage::spans::CoverageSpan;
 use crate::coverage::ExtractedHirInfo;
 
 /// Traverses the MIR body to produce an initial collection of coverage-relevant
@@ -22,7 +21,7 @@ pub(super) fn mir_to_initial_sorted_coverage_spans(
     mir_body: &mir::Body<'_>,
     hir_info: &ExtractedHirInfo,
     basic_coverage_blocks: &CoverageGraph,
-) -> Vec<CoverageSpan> {
+) -> Vec<SpanFromMir> {
     let &ExtractedHirInfo { body_span, .. } = hir_info;
 
     let mut initial_spans = vec![];
@@ -61,7 +60,7 @@ pub(super) fn mir_to_initial_sorted_coverage_spans(
             .then_with(|| Ord::cmp(&a.is_closure, &b.is_closure).reverse())
     });
 
-    initial_spans.into_iter().map(SpanFromMir::into_coverage_span).collect::<Vec<_>>()
+    initial_spans
 }
 
 /// Macros that expand into branches (e.g. `assert!`, `trace!`) tend to generate
@@ -119,10 +118,10 @@ fn split_visible_macro_spans(initial_spans: &mut Vec<SpanFromMir>) {
     initial_spans.extend(extra_spans);
 }
 
-// Generate a set of `CoverageSpan`s from the filtered set of `Statement`s and `Terminator`s of
-// the `BasicBlock`(s) in the given `BasicCoverageBlockData`. One `CoverageSpan` is generated
+// Generate a set of coverage spans from the filtered set of `Statement`s and `Terminator`s of
+// the `BasicBlock`(s) in the given `BasicCoverageBlockData`. One coverage span is generated
 // for each `Statement` and `Terminator`. (Note that subsequent stages of coverage analysis will
-// merge some `CoverageSpan`s, at which point a `CoverageSpan` may represent multiple
+// merge some coverage spans, at which point a coverage span may represent multiple
 // `Statement`s and/or `Terminator`s.)
 fn bcb_to_initial_coverage_spans<'a, 'tcx>(
     mir_body: &'a mir::Body<'tcx>,
@@ -138,7 +137,7 @@ fn bcb_to_initial_coverage_spans<'a, 'tcx>(
             let (span, visible_macro) =
                 unexpand_into_body_span_with_visible_macro(expn_span, body_span)?;
 
-            Some(SpanFromMir::new(span, visible_macro, bcb, is_closure_or_coroutine(statement)))
+            Some(SpanFromMir::new(span, visible_macro, bcb, is_closure_like(statement)))
         });
 
         let terminator_span = Some(data.terminator()).into_iter().filter_map(move |terminator| {
@@ -153,7 +152,7 @@ fn bcb_to_initial_coverage_spans<'a, 'tcx>(
     })
 }
 
-fn is_closure_or_coroutine(statement: &Statement<'_>) -> bool {
+fn is_closure_like(statement: &Statement<'_>) -> bool {
     match statement.kind {
         StatementKind::Assign(box (_, Rvalue::Aggregate(box ref agg_kind, _))) => match agg_kind {
             AggregateKind::Closure(_, _)
@@ -316,7 +315,7 @@ fn unexpand_into_body_span_with_prev(
 }
 
 #[derive(Debug)]
-struct SpanFromMir {
+pub(super) struct SpanFromMir {
     /// A span that has been extracted from MIR and then "un-expanded" back to
     /// within the current function's `body_span`. After various intermediate
     /// processing steps, this span is emitted as part of the final coverage
@@ -324,10 +323,10 @@ struct SpanFromMir {
     ///
     /// With the exception of `fn_sig_span`, this should always be contained
     /// within `body_span`.
-    span: Span,
+    pub(super) span: Span,
     visible_macro: Option<Symbol>,
-    bcb: BasicCoverageBlock,
-    is_closure: bool,
+    pub(super) bcb: BasicCoverageBlock,
+    pub(super) is_closure: bool,
 }
 
 impl SpanFromMir {
@@ -343,9 +342,4 @@ impl SpanFromMir {
     ) -> Self {
         Self { span, visible_macro, bcb, is_closure }
     }
-
-    fn into_coverage_span(self) -> CoverageSpan {
-        let Self { span, visible_macro: _, bcb, is_closure } = self;
-        CoverageSpan::new(span, bcb, is_closure)
-    }
 }
diff --git a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs
index a6750911394..ca63f5550ae 100644
--- a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs
+++ b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs
@@ -205,8 +205,8 @@ pub fn deduced_param_attrs<'tcx>(
             |(arg_index, local_decl)| DeducedParamAttrs {
                 read_only: !deduce_read_only.mutable_args.contains(arg_index)
                     // We must normalize here to reveal opaques and normalize
-                    // their substs, otherwise we'll see exponential blow-up in
-                    // compile times: #113372
+                    // their generic parameters, otherwise we'll see exponential
+                    // blow-up in compile times: #113372
                     && tcx
                         .normalize_erasing_regions(param_env, local_decl.ty)
                         .is_freeze(tcx, param_env),
diff --git a/compiler/rustc_mir_transform/src/dest_prop.rs b/compiler/rustc_mir_transform/src/dest_prop.rs
index 0ac4ab61d40..2c8201b1903 100644
--- a/compiler/rustc_mir_transform/src/dest_prop.rs
+++ b/compiler/rustc_mir_transform/src/dest_prop.rs
@@ -398,7 +398,8 @@ impl<'alloc> Candidates<'alloc> {
         let candidates = entry.get_mut();
         Self::vec_filter_candidates(p, candidates, f, at);
         if candidates.len() == 0 {
-            entry.remove();
+            // FIXME(#120456) - is `swap_remove` correct?
+            entry.swap_remove();
         }
     }
 
diff --git a/compiler/rustc_mir_transform/src/function_item_references.rs b/compiler/rustc_mir_transform/src/function_item_references.rs
index f413bd9b311..e935dc7f5eb 100644
--- a/compiler/rustc_mir_transform/src/function_item_references.rs
+++ b/compiler/rustc_mir_transform/src/function_item_references.rs
@@ -63,7 +63,7 @@ impl<'tcx> Visitor<'tcx> for FunctionItemRefChecker<'_, 'tcx> {
 
 impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
     /// Emits a lint for function reference arguments bound by `fmt::Pointer` in calls to the
-    /// function defined by `def_id` with the substitutions `args_ref`.
+    /// function defined by `def_id` with the generic parameters `args_ref`.
     fn check_bound_args(
         &self,
         def_id: DefId,
@@ -83,11 +83,11 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
                     for inner_ty in arg_def.walk().filter_map(|arg| arg.as_type()) {
                         // If the inner type matches the type bound by `Pointer`
                         if inner_ty == bound_ty {
-                            // Do a substitution using the parameters from the callsite
-                            let subst_ty =
+                            // Do an instantiation using the parameters from the callsite
+                            let instantiated_ty =
                                 EarlyBinder::bind(inner_ty).instantiate(self.tcx, args_ref);
                             if let Some((fn_id, fn_args)) =
-                                FunctionItemRefChecker::is_fn_ref(subst_ty)
+                                FunctionItemRefChecker::is_fn_ref(instantiated_ty)
                             {
                                 let mut span = self.nth_arg_span(args, arg_num);
                                 if span.from_expansion() {
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index 2c7ae53055f..a080e2423d4 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -399,7 +399,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     };
                     for (field_index, op) in fields.into_iter().enumerate() {
                         let field_dest = self.ecx.project_field(&variant_dest, field_index).ok()?;
-                        self.ecx.copy_op(op, &field_dest, /*allow_transmute*/ false).ok()?;
+                        self.ecx.copy_op(op, &field_dest).ok()?;
                     }
                     self.ecx.write_discriminant(variant.unwrap_or(FIRST_VARIANT), &dest).ok()?;
                     self.ecx
@@ -561,9 +561,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                         .ok()?;
                     dest.into()
                 }
-                CastKind::FnPtrToPtr
-                | CastKind::PtrToPtr
-                | CastKind::PointerCoercion(
+                CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
+                    let src = self.evaluated[value].as_ref()?;
+                    let src = self.ecx.read_immediate(src).ok()?;
+                    let to = self.ecx.layout_of(to).ok()?;
+                    let ret = self.ecx.ptr_to_ptr(&src, to).ok()?;
+                    ret.into()
+                }
+                CastKind::PointerCoercion(
                     ty::adjustment::PointerCoercion::MutToConstPointer
                     | ty::adjustment::PointerCoercion::ArrayToPointer
                     | ty::adjustment::PointerCoercion::UnsafeFnPointer,
@@ -571,8 +576,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     let src = self.evaluated[value].as_ref()?;
                     let src = self.ecx.read_immediate(src).ok()?;
                     let to = self.ecx.layout_of(to).ok()?;
-                    let ret = self.ecx.ptr_to_ptr(&src, to).ok()?;
-                    ret.into()
+                    ImmTy::from_immediate(*src, to).into()
                 }
                 _ => return None,
             },
@@ -853,10 +857,10 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
 
     fn simplify_discriminant(&mut self, place: VnIndex) -> Option<VnIndex> {
         if let Value::Aggregate(enum_ty, variant, _) = *self.get(place)
-            && let AggregateTy::Def(enum_did, enum_substs) = enum_ty
+            && let AggregateTy::Def(enum_did, enum_args) = enum_ty
             && let DefKind::Enum = self.tcx.def_kind(enum_did)
         {
-            let enum_ty = self.tcx.type_of(enum_did).instantiate(self.tcx, enum_substs);
+            let enum_ty = self.tcx.type_of(enum_did).instantiate(self.tcx, enum_args);
             let discr = self.ecx.discriminant_for_variant(enum_ty, variant).ok()?;
             return Some(self.insert_scalar(discr.to_scalar(), discr.layout.ty));
         }
@@ -899,13 +903,11 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 assert!(!fields.is_empty());
                 (AggregateTy::Tuple, FIRST_VARIANT)
             }
-            AggregateKind::Closure(did, substs)
-            | AggregateKind::CoroutineClosure(did, substs)
-            | AggregateKind::Coroutine(did, substs) => {
-                (AggregateTy::Def(did, substs), FIRST_VARIANT)
-            }
-            AggregateKind::Adt(did, variant_index, substs, _, None) => {
-                (AggregateTy::Def(did, substs), variant_index)
+            AggregateKind::Closure(did, args)
+            | AggregateKind::CoroutineClosure(did, args)
+            | AggregateKind::Coroutine(did, args) => (AggregateTy::Def(did, args), FIRST_VARIANT),
+            AggregateKind::Adt(did, variant_index, args, _, None) => {
+                (AggregateTy::Def(did, args), variant_index)
             }
             // Do not track unions.
             AggregateKind::Adt(_, _, _, _, Some(_)) => return None,
@@ -1179,8 +1181,7 @@ fn op_to_prop_const<'tcx>(
     }
 
     // Everything failed: create a new allocation to hold the data.
-    let alloc_id =
-        ecx.intern_with_temp_alloc(op.layout, |ecx, dest| ecx.copy_op(op, dest, false)).ok()?;
+    let alloc_id = ecx.intern_with_temp_alloc(op.layout, |ecx, dest| ecx.copy_op(op, dest)).ok()?;
     let value = ConstValue::Indirect { alloc_id, offset: Size::ZERO };
 
     // Check that we do not leak a pointer.
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index be19bd8349e..956d855ab81 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -2,6 +2,7 @@
 use crate::deref_separator::deref_finder;
 use rustc_attr::InlineAttr;
 use rustc_const_eval::transform::validate::validate_types;
+use rustc_hir::def::DefKind;
 use rustc_hir::def_id::DefId;
 use rustc_index::bit_set::BitSet;
 use rustc_index::Idx;
@@ -384,6 +385,17 @@ impl<'tcx> Inliner<'tcx> {
                 }
 
                 let fn_sig = self.tcx.fn_sig(def_id).instantiate(self.tcx, args);
+
+                // Additionally, check that the body that we're inlining actually agrees
+                // with the ABI of the trait that the item comes from.
+                if let InstanceDef::Item(instance_def_id) = callee.def
+                    && self.tcx.def_kind(instance_def_id) == DefKind::AssocFn
+                    && let instance_fn_sig = self.tcx.fn_sig(instance_def_id).skip_binder()
+                    && instance_fn_sig.abi() != fn_sig.abi()
+                {
+                    return None;
+                }
+
                 let source_info = SourceInfo { span: fn_span, ..terminator.source_info };
 
                 return Some(CallSite { callee, fn_sig, block: bb, source_info });
diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs
index 5b03bc361dd..f2b6dcac586 100644
--- a/compiler/rustc_mir_transform/src/inline/cycle.rs
+++ b/compiler/rustc_mir_transform/src/inline/cycle.rs
@@ -80,7 +80,7 @@ pub(crate) fn mir_callgraph_reachable<'tcx>(
                 }
                 // These have no own callable MIR.
                 InstanceDef::Intrinsic(_) | InstanceDef::Virtual(..) => continue,
-                // These have MIR and if that MIR is inlined, substituted and then inlining is run
+                // These have MIR and if that MIR is inlined, instantiated and then inlining is run
                 // again, a function item can end up getting inlined. Thus we'll be able to cause
                 // a cycle that way
                 InstanceDef::VTableShim(_)
@@ -95,7 +95,7 @@ pub(crate) fn mir_callgraph_reachable<'tcx>(
                 // This shim does not call any other functions, thus there can be no recursion.
                 InstanceDef::FnPtrAddrShim(..) => continue,
                 InstanceDef::DropGlue(..) => {
-                    // FIXME: A not fully substituted drop shim can cause ICEs if one attempts to
+                    // FIXME: A not fully instantiated drop shim can cause ICEs if one attempts to
                     // have its MIR built. Likely oli-obk just screwed up the `ParamEnv`s, so this
                     // needs some more analysis.
                     if callee.has_param() {
diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs
index a9de37244c5..73102a5f026 100644
--- a/compiler/rustc_mir_transform/src/instsimplify.rs
+++ b/compiler/rustc_mir_transform/src/instsimplify.rs
@@ -29,17 +29,14 @@ impl<'tcx> MirPass<'tcx> for InstSimplify {
                         ctx.simplify_bool_cmp(&statement.source_info, rvalue);
                         ctx.simplify_ref_deref(&statement.source_info, rvalue);
                         ctx.simplify_len(&statement.source_info, rvalue);
-                        ctx.simplify_cast(&statement.source_info, rvalue);
+                        ctx.simplify_cast(rvalue);
                     }
                     _ => {}
                 }
             }
 
             ctx.simplify_primitive_clone(block.terminator.as_mut().unwrap(), &mut block.statements);
-            ctx.simplify_intrinsic_assert(
-                block.terminator.as_mut().unwrap(),
-                &mut block.statements,
-            );
+            ctx.simplify_intrinsic_assert(block.terminator.as_mut().unwrap());
             ctx.simplify_nounwind_call(block.terminator.as_mut().unwrap());
             simplify_duplicate_switch_targets(block.terminator.as_mut().unwrap());
         }
@@ -143,7 +140,7 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
         }
     }
 
-    fn simplify_cast(&self, _source_info: &SourceInfo, rvalue: &mut Rvalue<'tcx>) {
+    fn simplify_cast(&self, rvalue: &mut Rvalue<'tcx>) {
         if let Rvalue::Cast(kind, operand, cast_ty) = rvalue {
             let operand_ty = operand.ty(self.local_decls, self.tcx);
             if operand_ty == *cast_ty {
@@ -211,7 +208,7 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
         // Only bother looking more if it's easy to know what we're calling
         let Some((fn_def_id, fn_args)) = func.const_fn_def() else { return };
 
-        // Clone needs one subst, so we can cheaply rule out other stuff
+        // Clone needs one arg, so we can cheaply rule out other stuff
         if fn_args.len() != 1 {
             return;
         }
@@ -277,11 +274,7 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
         }
     }
 
-    fn simplify_intrinsic_assert(
-        &self,
-        terminator: &mut Terminator<'tcx>,
-        _statements: &mut Vec<Statement<'tcx>>,
-    ) {
+    fn simplify_intrinsic_assert(&self, terminator: &mut Terminator<'tcx>) {
         let TerminatorKind::Call { func, target, .. } = &mut terminator.kind else {
             return;
         };
diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs
index 78ba166ba43..ad8f21ffbda 100644
--- a/compiler/rustc_mir_transform/src/jump_threading.rs
+++ b/compiler/rustc_mir_transform/src/jump_threading.rs
@@ -68,6 +68,12 @@ impl<'tcx> MirPass<'tcx> for JumpThreading {
         let def_id = body.source.def_id();
         debug!(?def_id);
 
+        // Optimizing coroutines creates query cycles.
+        if tcx.is_coroutine(def_id) {
+            trace!("Skipped for coroutine {:?}", def_id);
+            return;
+        }
+
         let param_env = tcx.param_env_reveal_all_normalized(def_id);
         let map = Map::new(tcx, body, Some(MAX_PLACES));
         let loop_headers = loop_headers(body);
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
index 524d62546dd..7f0e6f90dbb 100644
--- a/compiler/rustc_mir_transform/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -264,6 +264,7 @@ fn mir_const_qualif(tcx: TyCtxt<'_>, def: LocalDefId) -> ConstQualifs {
     let body = &tcx.mir_const(def).borrow();
 
     if body.return_ty().references_error() {
+        // It's possible to reach here without an error being emitted (#121103).
         tcx.dcx().span_delayed_bug(body.span, "mir_const_qualif: MIR had errors");
         return Default::default();
     }
@@ -435,7 +436,7 @@ fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> &
     //
     // We manually filter the predicates, skipping anything that's not
     // "global". We are in a potentially generic context
-    // (e.g. we are evaluating a function without substituting generic
+    // (e.g. we are evaluating a function without instantiating generic
     // parameters, so this filtering serves two purposes:
     //
     // 1. We skip evaluating any predicates that we would
@@ -575,10 +576,10 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
             &inline::Inline,
             // Code from other crates may have storage markers, so this needs to happen after inlining.
             &remove_storage_markers::RemoveStorageMarkers,
-            // Inlining and substitution may introduce ZST and useless drops.
+            // Inlining and instantiation may introduce ZST and useless drops.
             &remove_zsts::RemoveZsts,
             &remove_unneeded_drops::RemoveUnneededDrops,
-            // Type substitution may create uninhabited enums.
+            // Type instantiation may create uninhabited enums.
             &uninhabited_enum_branching::UninhabitedEnumBranching,
             &unreachable_prop::UnreachablePropagation,
             &o1(simplify::SimplifyCfg::AfterUninhabitedEnumBranching),
@@ -651,7 +652,6 @@ fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> {
     debug!("about to call mir_drops_elaborated...");
     let body = tcx.mir_drops_elaborated_and_const_checked(did).steal();
     let mut body = remap_mir_for_const_eval_select(tcx, body, hir::Constness::NotConst);
-    debug!("body: {:#?}", body);
 
     if body.tainted_by_errors.is_some() {
         return body;
@@ -672,7 +672,7 @@ fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> {
 }
 
 /// Fetch all the promoteds of an item and prepare their MIR bodies to be ready for
-/// constant evaluation once all substitutions become known.
+/// constant evaluation once all generic parameters become known.
 fn promoted_mir(tcx: TyCtxt<'_>, def: LocalDefId) -> &IndexVec<Promoted, Body<'_>> {
     if tcx.is_constructor(def.to_def_id()) {
         return tcx.arena.alloc(IndexVec::new());
diff --git a/compiler/rustc_mir_transform/src/ref_prop.rs b/compiler/rustc_mir_transform/src/ref_prop.rs
index 05a3ac3cc75..d5642be5513 100644
--- a/compiler/rustc_mir_transform/src/ref_prop.rs
+++ b/compiler/rustc_mir_transform/src/ref_prop.rs
@@ -44,7 +44,7 @@ use crate::ssa::{SsaLocals, StorageLiveLocals};
 ///
 /// # Liveness
 ///
-/// When performing a substitution, we must take care not to introduce uses of dangling locals.
+/// When performing an instantiation, we must take care not to introduce uses of dangling locals.
 /// To ensure this, we walk the body with the `MaybeStorageDead` dataflow analysis:
 /// - if we want to replace `*x` by reborrow `*y` and `y` may be dead, we allow replacement and
 ///   mark storage statements on `y` for removal;
@@ -55,7 +55,7 @@ use crate::ssa::{SsaLocals, StorageLiveLocals};
 ///
 /// For `&mut` borrows, we also need to preserve the uniqueness property:
 /// we must avoid creating a state where we interleave uses of `*_1` and `_2`.
-/// To do it, we only perform full substitution of mutable borrows:
+/// To do it, we only perform full instantiation of mutable borrows:
 /// we replace either all or none of the occurrences of `*_1`.
 ///
 /// Some care has to be taken when `_1` is copied in other locals.
@@ -63,10 +63,10 @@ use crate::ssa::{SsaLocals, StorageLiveLocals};
 ///   _3 = *_1;
 ///   _4 = _1
 ///   _5 = *_4
-/// In such cases, fully substituting `_1` means fully substituting all of the copies.
+/// In such cases, fully instantiating `_1` means fully instantiating all of the copies.
 ///
 /// For immutable borrows, we do not need to preserve such uniqueness property,
-/// so we perform all the possible substitutions without removing the `_1 = &_2` statement.
+/// so we perform all the possible instantiations without removing the `_1 = &_2` statement.
 pub struct ReferencePropagation;
 
 impl<'tcx> MirPass<'tcx> for ReferencePropagation {
diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
index 095119e2e3f..fb52bfa468a 100644
--- a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
+++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
@@ -15,7 +15,8 @@ impl<'tcx> MirPass<'tcx> for RemoveNoopLandingPads {
     }
 
     fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-        debug!("remove_noop_landing_pads({:?})", body);
+        let def_id = body.source.def_id();
+        debug!(?def_id);
         self.remove_nop_landing_pads(body)
     }
 }
@@ -81,8 +82,6 @@ impl RemoveNoopLandingPads {
     }
 
     fn remove_nop_landing_pads(&self, body: &mut Body<'_>) {
-        debug!("body: {:#?}", body);
-
         // Skip the pass if there are no blocks with a resume terminator.
         let has_resume = body
             .basic_blocks
diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs
index 860d280be29..75613a2c555 100644
--- a/compiler/rustc_mir_transform/src/shim.rs
+++ b/compiler/rustc_mir_transform/src/shim.rs
@@ -477,7 +477,7 @@ struct CloneShimBuilder<'tcx> {
 
 impl<'tcx> CloneShimBuilder<'tcx> {
     fn new(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Self {
-        // we must subst the self_ty because it's
+        // we must instantiate the self_ty because it's
         // otherwise going to be TySelf and we can't index
         // or access fields of a Place of type TySelf.
         let sig = tcx.fn_sig(def_id).instantiate(tcx, &[self_ty.into()]);
@@ -716,8 +716,8 @@ fn build_call_shim<'tcx>(
     call_kind: CallKind<'tcx>,
 ) -> Body<'tcx> {
     // `FnPtrShim` contains the fn pointer type that a call shim is being built for - this is used
-    // to substitute into the signature of the shim. It is not necessary for users of this
-    // MIR body to perform further substitutions (see `InstanceDef::has_polymorphic_mir_body`).
+    // to instantiate into the signature of the shim. It is not necessary for users of this
+    // MIR body to perform further instantiations (see `InstanceDef::has_polymorphic_mir_body`).
     let (sig_args, untuple_args) = if let ty::InstanceDef::FnPtrShim(_, ty) = instance {
         let sig = tcx.instantiate_bound_regions_with_erased(ty.fn_sig(tcx));
 
diff --git a/compiler/rustc_mir_transform/src/ssa.rs b/compiler/rustc_mir_transform/src/ssa.rs
index 1ed3b14e755..e4fdbd6ae69 100644
--- a/compiler/rustc_mir_transform/src/ssa.rs
+++ b/compiler/rustc_mir_transform/src/ssa.rs
@@ -170,7 +170,7 @@ impl SsaLocals {
     ///   _c => _a
     ///   _d => _a // transitively through _c
     ///
-    /// Exception: we do not see through the return place, as it cannot be substituted.
+    /// Exception: we do not see through the return place, as it cannot be instantiated.
     pub fn copy_classes(&self) -> &IndexSlice<Local, Local> {
         &self.copy_classes
     }