about summary refs log tree commit diff
path: root/compiler/rustc_mir_transform/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_mir_transform/src')
-rw-r--r--compiler/rustc_mir_transform/src/const_prop.rs8
-rw-r--r--compiler/rustc_mir_transform/src/coroutine.rs275
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs172
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs25
-rw-r--r--compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml7
-rw-r--r--compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs6
-rw-r--r--compiler/rustc_mir_transform/src/coverage/tests.rs90
-rw-r--r--compiler/rustc_mir_transform/src/dataflow_const_prop.rs3
-rw-r--r--compiler/rustc_mir_transform/src/errors.rs6
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs7
10 files changed, 324 insertions, 275 deletions
diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs
index b96125de95e..d88b33cc973 100644
--- a/compiler/rustc_mir_transform/src/const_prop.rs
+++ b/compiler/rustc_mir_transform/src/const_prop.rs
@@ -11,6 +11,7 @@ use rustc_middle::mir::visit::{
     MutVisitor, MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor,
 };
 use rustc_middle::mir::*;
+use rustc_middle::query::TyCtxtAt;
 use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
 use rustc_middle::ty::{self, GenericArgs, Instance, ParamEnv, Ty, TyCtxt, TypeVisitableExt};
 use rustc_span::{def_id::DefId, Span};
@@ -220,7 +221,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
     }
 
     fn before_access_global(
-        _tcx: TyCtxt<'tcx>,
+        _tcx: TyCtxtAt<'tcx>,
         _machine: &Self,
         _alloc_id: AllocId,
         alloc: ConstAllocation<'tcx>,
@@ -240,10 +241,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
     }
 
     #[inline(always)]
-    fn expose_ptr(
-        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
-        _ptr: Pointer<AllocId>,
-    ) -> InterpResult<'tcx> {
+    fn expose_ptr(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx> {
         throw_machine_stop_str!("exposing pointers isn't supported in ConstProp")
     }
 
diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs
index 1373596fd2b..737fb6bf612 100644
--- a/compiler/rustc_mir_transform/src/coroutine.rs
+++ b/compiler/rustc_mir_transform/src/coroutine.rs
@@ -66,9 +66,9 @@ use rustc_index::{Idx, IndexVec};
 use rustc_middle::mir::dump_mir;
 use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
 use rustc_middle::mir::*;
+use rustc_middle::ty::CoroutineArgs;
 use rustc_middle::ty::InstanceDef;
-use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt};
-use rustc_middle::ty::{CoroutineArgs, GenericArgsRef};
+use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_mir_dataflow::impls::{
     MaybeBorrowedLocals, MaybeLiveLocals, MaybeRequiresStorage, MaybeStorageLive,
 };
@@ -225,8 +225,6 @@ struct SuspensionPoint<'tcx> {
 struct TransformVisitor<'tcx> {
     tcx: TyCtxt<'tcx>,
     coroutine_kind: hir::CoroutineKind,
-    state_adt_ref: AdtDef<'tcx>,
-    state_args: GenericArgsRef<'tcx>,
 
     // The type of the discriminant in the coroutine struct
     discr_ty: Ty<'tcx>,
@@ -245,22 +243,56 @@ struct TransformVisitor<'tcx> {
     always_live_locals: BitSet<Local>,
 
     // The original RETURN_PLACE local
-    new_ret_local: Local,
+    old_ret_local: Local,
+
+    old_yield_ty: Ty<'tcx>,
+
+    old_ret_ty: Ty<'tcx>,
 }
 
 impl<'tcx> TransformVisitor<'tcx> {
     fn insert_none_ret_block(&self, body: &mut Body<'tcx>) -> BasicBlock {
         let block = BasicBlock::new(body.basic_blocks.len());
-
         let source_info = SourceInfo::outermost(body.span);
 
-        let (kind, idx) = self.coroutine_state_adt_and_variant_idx(true);
-        assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 0);
+        let none_value = match self.coroutine_kind {
+            CoroutineKind::Async(_) => span_bug!(body.span, "`Future`s are not fused inherently"),
+            CoroutineKind::Coroutine => span_bug!(body.span, "`Coroutine`s cannot be fused"),
+            // `gen` continues return `None`
+            CoroutineKind::Gen(_) => {
+                let option_def_id = self.tcx.require_lang_item(LangItem::Option, None);
+                Rvalue::Aggregate(
+                    Box::new(AggregateKind::Adt(
+                        option_def_id,
+                        VariantIdx::from_usize(0),
+                        self.tcx.mk_args(&[self.old_yield_ty.into()]),
+                        None,
+                        None,
+                    )),
+                    IndexVec::new(),
+                )
+            }
+            // `async gen` continues to return `Poll::Ready(None)`
+            CoroutineKind::AsyncGen(_) => {
+                let ty::Adt(_poll_adt, args) = *self.old_yield_ty.kind() else { bug!() };
+                let ty::Adt(_option_adt, args) = *args.type_at(0).kind() else { bug!() };
+                let yield_ty = args.type_at(0);
+                Rvalue::Use(Operand::Constant(Box::new(ConstOperand {
+                    span: source_info.span,
+                    const_: Const::Unevaluated(
+                        UnevaluatedConst::new(
+                            self.tcx.require_lang_item(LangItem::AsyncGenFinished, None),
+                            self.tcx.mk_args(&[yield_ty.into()]),
+                        ),
+                        self.old_yield_ty,
+                    ),
+                    user_ty: None,
+                })))
+            }
+        };
+
         let statements = vec![Statement {
-            kind: StatementKind::Assign(Box::new((
-                Place::return_place(),
-                Rvalue::Aggregate(Box::new(kind), IndexVec::new()),
-            ))),
+            kind: StatementKind::Assign(Box::new((Place::return_place(), none_value))),
             source_info,
         }];
 
@@ -273,23 +305,6 @@ impl<'tcx> TransformVisitor<'tcx> {
         block
     }
 
-    fn coroutine_state_adt_and_variant_idx(
-        &self,
-        is_return: bool,
-    ) -> (AggregateKind<'tcx>, VariantIdx) {
-        let idx = VariantIdx::new(match (is_return, self.coroutine_kind) {
-            (true, hir::CoroutineKind::Coroutine) => 1, // CoroutineState::Complete
-            (false, hir::CoroutineKind::Coroutine) => 0, // CoroutineState::Yielded
-            (true, hir::CoroutineKind::Async(_)) => 0,  // Poll::Ready
-            (false, hir::CoroutineKind::Async(_)) => 1, // Poll::Pending
-            (true, hir::CoroutineKind::Gen(_)) => 0,    // Option::None
-            (false, hir::CoroutineKind::Gen(_)) => 1,   // Option::Some
-        });
-
-        let kind = AggregateKind::Adt(self.state_adt_ref.did(), idx, self.state_args, None, None);
-        (kind, idx)
-    }
-
     // Make a `CoroutineState` or `Poll` variant assignment.
     //
     // `core::ops::CoroutineState` only has single element tuple variants,
@@ -302,51 +317,119 @@ impl<'tcx> TransformVisitor<'tcx> {
         is_return: bool,
         statements: &mut Vec<Statement<'tcx>>,
     ) {
-        let (kind, idx) = self.coroutine_state_adt_and_variant_idx(is_return);
-
-        match self.coroutine_kind {
-            // `Poll::Pending`
+        let rvalue = match self.coroutine_kind {
             CoroutineKind::Async(_) => {
-                if !is_return {
-                    assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 0);
-
-                    // FIXME(swatinem): assert that `val` is indeed unit?
-                    statements.push(Statement {
-                        kind: StatementKind::Assign(Box::new((
-                            Place::return_place(),
-                            Rvalue::Aggregate(Box::new(kind), IndexVec::new()),
-                        ))),
-                        source_info,
-                    });
-                    return;
+                let poll_def_id = self.tcx.require_lang_item(LangItem::Poll, None);
+                let args = self.tcx.mk_args(&[self.old_ret_ty.into()]);
+                if is_return {
+                    // Poll::Ready(val)
+                    Rvalue::Aggregate(
+                        Box::new(AggregateKind::Adt(
+                            poll_def_id,
+                            VariantIdx::from_usize(0),
+                            args,
+                            None,
+                            None,
+                        )),
+                        IndexVec::from_raw(vec![val]),
+                    )
+                } else {
+                    // Poll::Pending
+                    Rvalue::Aggregate(
+                        Box::new(AggregateKind::Adt(
+                            poll_def_id,
+                            VariantIdx::from_usize(1),
+                            args,
+                            None,
+                            None,
+                        )),
+                        IndexVec::new(),
+                    )
                 }
             }
-            // `Option::None`
             CoroutineKind::Gen(_) => {
+                let option_def_id = self.tcx.require_lang_item(LangItem::Option, None);
+                let args = self.tcx.mk_args(&[self.old_yield_ty.into()]);
                 if is_return {
-                    assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 0);
-
-                    statements.push(Statement {
-                        kind: StatementKind::Assign(Box::new((
-                            Place::return_place(),
-                            Rvalue::Aggregate(Box::new(kind), IndexVec::new()),
-                        ))),
-                        source_info,
-                    });
-                    return;
+                    // None
+                    Rvalue::Aggregate(
+                        Box::new(AggregateKind::Adt(
+                            option_def_id,
+                            VariantIdx::from_usize(0),
+                            args,
+                            None,
+                            None,
+                        )),
+                        IndexVec::new(),
+                    )
+                } else {
+                    // Some(val)
+                    Rvalue::Aggregate(
+                        Box::new(AggregateKind::Adt(
+                            option_def_id,
+                            VariantIdx::from_usize(1),
+                            args,
+                            None,
+                            None,
+                        )),
+                        IndexVec::from_raw(vec![val]),
+                    )
                 }
             }
-            CoroutineKind::Coroutine => {}
-        }
-
-        // else: `Poll::Ready(x)`, `CoroutineState::Yielded(x)`, `CoroutineState::Complete(x)`, or `Option::Some(x)`
-        assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 1);
+            CoroutineKind::AsyncGen(_) => {
+                if is_return {
+                    let ty::Adt(_poll_adt, args) = *self.old_yield_ty.kind() else { bug!() };
+                    let ty::Adt(_option_adt, args) = *args.type_at(0).kind() else { bug!() };
+                    let yield_ty = args.type_at(0);
+                    Rvalue::Use(Operand::Constant(Box::new(ConstOperand {
+                        span: source_info.span,
+                        const_: Const::Unevaluated(
+                            UnevaluatedConst::new(
+                                self.tcx.require_lang_item(LangItem::AsyncGenFinished, None),
+                                self.tcx.mk_args(&[yield_ty.into()]),
+                            ),
+                            self.old_yield_ty,
+                        ),
+                        user_ty: None,
+                    })))
+                } else {
+                    Rvalue::Use(val)
+                }
+            }
+            CoroutineKind::Coroutine => {
+                let coroutine_state_def_id =
+                    self.tcx.require_lang_item(LangItem::CoroutineState, None);
+                let args = self.tcx.mk_args(&[self.old_yield_ty.into(), self.old_ret_ty.into()]);
+                if is_return {
+                    // CoroutineState::Complete(val)
+                    Rvalue::Aggregate(
+                        Box::new(AggregateKind::Adt(
+                            coroutine_state_def_id,
+                            VariantIdx::from_usize(1),
+                            args,
+                            None,
+                            None,
+                        )),
+                        IndexVec::from_raw(vec![val]),
+                    )
+                } else {
+                    // CoroutineState::Yielded(val)
+                    Rvalue::Aggregate(
+                        Box::new(AggregateKind::Adt(
+                            coroutine_state_def_id,
+                            VariantIdx::from_usize(0),
+                            args,
+                            None,
+                            None,
+                        )),
+                        IndexVec::from_raw(vec![val]),
+                    )
+                }
+            }
+        };
 
         statements.push(Statement {
-            kind: StatementKind::Assign(Box::new((
-                Place::return_place(),
-                Rvalue::Aggregate(Box::new(kind), [val].into()),
-            ))),
+            kind: StatementKind::Assign(Box::new((Place::return_place(), rvalue))),
             source_info,
         });
     }
@@ -420,7 +503,7 @@ impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> {
 
         let ret_val = match data.terminator().kind {
             TerminatorKind::Return => {
-                Some((true, None, Operand::Move(Place::from(self.new_ret_local)), None))
+                Some((true, None, Operand::Move(Place::from(self.old_ret_local)), None))
             }
             TerminatorKind::Yield { ref value, resume, resume_arg, drop } => {
                 Some((false, Some((resume, resume_arg)), value.clone(), drop))
@@ -679,15 +762,15 @@ fn locals_live_across_suspend_points<'tcx>(
     let borrowed_locals_results =
         MaybeBorrowedLocals.into_engine(tcx, body).pass_name("coroutine").iterate_to_fixpoint();
 
-    let mut borrowed_locals_cursor = borrowed_locals_results.cloned_results_cursor(body);
+    let mut borrowed_locals_cursor = borrowed_locals_results.clone().into_results_cursor(body);
 
     // Calculate the MIR locals that we actually need to keep storage around
     // for.
-    let mut requires_storage_results =
-        MaybeRequiresStorage::new(borrowed_locals_results.cloned_results_cursor(body))
+    let mut requires_storage_cursor =
+        MaybeRequiresStorage::new(borrowed_locals_results.into_results_cursor(body))
             .into_engine(tcx, body)
-            .iterate_to_fixpoint();
-    let mut requires_storage_cursor = requires_storage_results.as_results_cursor(body);
+            .iterate_to_fixpoint()
+            .into_results_cursor(body);
 
     // Calculate the liveness of MIR locals ignoring borrows.
     let mut liveness = MaybeLiveLocals
@@ -763,7 +846,7 @@ fn locals_live_across_suspend_points<'tcx>(
         body,
         &saved_locals,
         always_live_locals.clone(),
-        requires_storage_results,
+        requires_storage_cursor.into_results(),
     );
 
     LivenessInfo {
@@ -828,7 +911,7 @@ fn compute_storage_conflicts<'mir, 'tcx>(
     body: &'mir Body<'tcx>,
     saved_locals: &CoroutineSavedLocals,
     always_live_locals: BitSet<Local>,
-    mut requires_storage: rustc_mir_dataflow::Results<'tcx, MaybeRequiresStorage<'_, 'mir, 'tcx>>,
+    mut requires_storage: rustc_mir_dataflow::Results<'tcx, MaybeRequiresStorage<'mir, 'tcx>>,
 ) -> BitMatrix<CoroutineSavedLocal, CoroutineSavedLocal> {
     assert_eq!(body.local_decls.len(), saved_locals.domain_size());
 
@@ -1334,7 +1417,9 @@ fn create_coroutine_resume_function<'tcx>(
             CoroutineKind::Async(_) | CoroutineKind::Coroutine => {
                 insert_panic_block(tcx, body, ResumedAfterReturn(coroutine_kind))
             }
-            CoroutineKind::Gen(_) => transform.insert_none_ret_block(body),
+            CoroutineKind::AsyncGen(_) | CoroutineKind::Gen(_) => {
+                transform.insert_none_ret_block(body)
+            }
         };
         cases.insert(1, (RETURNED, block));
     }
@@ -1493,10 +1578,11 @@ pub(crate) fn mir_coroutine_witnesses<'tcx>(
 
 impl<'tcx> MirPass<'tcx> for StateTransform {
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-        let Some(yield_ty) = body.yield_ty() else {
+        let Some(old_yield_ty) = body.yield_ty() else {
             // This only applies to coroutines
             return;
         };
+        let old_ret_ty = body.return_ty();
 
         assert!(body.coroutine_drop().is_none());
 
@@ -1519,38 +1605,42 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
         };
 
         let is_async_kind = matches!(body.coroutine_kind(), Some(CoroutineKind::Async(_)));
+        let is_async_gen_kind = matches!(body.coroutine_kind(), Some(CoroutineKind::AsyncGen(_)));
         let is_gen_kind = matches!(body.coroutine_kind(), Some(CoroutineKind::Gen(_)));
-        let (state_adt_ref, state_args) = match body.coroutine_kind().unwrap() {
+        let new_ret_ty = match body.coroutine_kind().unwrap() {
             CoroutineKind::Async(_) => {
                 // Compute Poll<return_ty>
                 let poll_did = tcx.require_lang_item(LangItem::Poll, None);
                 let poll_adt_ref = tcx.adt_def(poll_did);
-                let poll_args = tcx.mk_args(&[body.return_ty().into()]);
-                (poll_adt_ref, poll_args)
+                let poll_args = tcx.mk_args(&[old_ret_ty.into()]);
+                Ty::new_adt(tcx, poll_adt_ref, poll_args)
             }
             CoroutineKind::Gen(_) => {
                 // Compute Option<yield_ty>
                 let option_did = tcx.require_lang_item(LangItem::Option, None);
                 let option_adt_ref = tcx.adt_def(option_did);
-                let option_args = tcx.mk_args(&[body.yield_ty().unwrap().into()]);
-                (option_adt_ref, option_args)
+                let option_args = tcx.mk_args(&[old_yield_ty.into()]);
+                Ty::new_adt(tcx, option_adt_ref, option_args)
+            }
+            CoroutineKind::AsyncGen(_) => {
+                // The yield ty is already `Poll<Option<yield_ty>>`
+                old_yield_ty
             }
             CoroutineKind::Coroutine => {
                 // Compute CoroutineState<yield_ty, return_ty>
                 let state_did = tcx.require_lang_item(LangItem::CoroutineState, None);
                 let state_adt_ref = tcx.adt_def(state_did);
-                let state_args = tcx.mk_args(&[yield_ty.into(), body.return_ty().into()]);
-                (state_adt_ref, state_args)
+                let state_args = tcx.mk_args(&[old_yield_ty.into(), old_ret_ty.into()]);
+                Ty::new_adt(tcx, state_adt_ref, state_args)
             }
         };
-        let ret_ty = Ty::new_adt(tcx, state_adt_ref, state_args);
 
-        // We rename RETURN_PLACE which has type mir.return_ty to new_ret_local
+        // We rename RETURN_PLACE which has type mir.return_ty to old_ret_local
         // RETURN_PLACE then is a fresh unused local with type ret_ty.
-        let new_ret_local = replace_local(RETURN_PLACE, ret_ty, body, tcx);
+        let old_ret_local = replace_local(RETURN_PLACE, new_ret_ty, body, tcx);
 
         // Replace all occurrences of `ResumeTy` with `&mut Context<'_>` within async bodies.
-        if is_async_kind {
+        if is_async_kind || is_async_gen_kind {
             transform_async_context(tcx, body);
         }
 
@@ -1564,9 +1654,10 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
         } else {
             body.local_decls[resume_local].ty
         };
-        let new_resume_local = replace_local(resume_local, resume_ty, body, tcx);
+        let old_resume_local = replace_local(resume_local, resume_ty, body, tcx);
 
-        // When first entering the coroutine, move the resume argument into its new local.
+        // When first entering the coroutine, move the resume argument into its old local
+        // (which is now a generator interior).
         let source_info = SourceInfo::outermost(body.span);
         let stmts = &mut body.basic_blocks_mut()[START_BLOCK].statements;
         stmts.insert(
@@ -1574,7 +1665,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
             Statement {
                 source_info,
                 kind: StatementKind::Assign(Box::new((
-                    new_resume_local.into(),
+                    old_resume_local.into(),
                     Rvalue::Use(Operand::Move(resume_local.into())),
                 ))),
             },
@@ -1610,14 +1701,14 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
         let mut transform = TransformVisitor {
             tcx,
             coroutine_kind: body.coroutine_kind().unwrap(),
-            state_adt_ref,
-            state_args,
             remap,
             storage_liveness,
             always_live_locals,
             suspension_points: Vec::new(),
-            new_ret_local,
+            old_ret_local,
             discr_ty,
+            old_ret_ty,
+            old_yield_ty,
         };
         transform.visit_body(body);
 
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
index e0abb5da047..05ad14f1525 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -89,10 +89,10 @@ impl CoverageSpan {
         }
     }
 
-    pub fn merge_from(&mut self, mut other: CoverageSpan) {
-        debug_assert!(self.is_mergeable(&other));
+    pub fn merge_from(&mut self, other: &Self) {
+        debug_assert!(self.is_mergeable(other));
         self.span = self.span.to(other.span);
-        self.merged_spans.append(&mut other.merged_spans);
+        self.merged_spans.extend_from_slice(&other.merged_spans);
     }
 
     pub fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) {
@@ -129,16 +129,14 @@ impl CoverageSpan {
     /// If the span is part of a macro, and the macro is visible (expands directly to the given
     /// body_span), returns the macro name symbol.
     pub fn visible_macro(&self, body_span: Span) -> Option<Symbol> {
-        if let Some(current_macro) = self.current_macro()
-            && self
-                .expn_span
-                .parent_callsite()
-                .unwrap_or_else(|| bug!("macro must have a parent"))
-                .eq_ctxt(body_span)
-        {
-            return Some(current_macro);
-        }
-        None
+        let current_macro = self.current_macro()?;
+        let parent_callsite = self.expn_span.parent_callsite()?;
+
+        // In addition to matching the context of the body span, the parent callsite
+        // must also be the source callsite, i.e. the parent must have no parent.
+        let is_visible_macro =
+            parent_callsite.parent_callsite().is_none() && parent_callsite.eq_ctxt(body_span);
+        is_visible_macro.then_some(current_macro)
     }
 
     pub fn is_macro_expansion(&self) -> bool {
@@ -269,7 +267,7 @@ impl<'a> CoverageSpansGenerator<'a> {
             if curr.is_mergeable(prev) {
                 debug!("  same bcb (and neither is a closure), merge with prev={prev:?}");
                 let prev = self.take_prev();
-                self.curr_mut().merge_from(prev);
+                self.curr_mut().merge_from(&prev);
                 self.maybe_push_macro_name_span();
             // Note that curr.span may now differ from curr_original_span
             } else if prev.span.hi() <= curr.span.lo() {
@@ -277,7 +275,7 @@ impl<'a> CoverageSpansGenerator<'a> {
                     "  different bcbs and disjoint spans, so keep curr for next iter, and add prev={prev:?}",
                 );
                 let prev = self.take_prev();
-                self.push_refined_span(prev);
+                self.refined_spans.push(prev);
                 self.maybe_push_macro_name_span();
             } else if prev.is_closure {
                 // drop any equal or overlapping span (`curr`) and keep `prev` to test again in the
@@ -321,33 +319,30 @@ impl<'a> CoverageSpansGenerator<'a> {
             }
         }
 
-        let prev = self.take_prev();
-        debug!("    AT END, adding last prev={prev:?}");
-
-        // Take `pending_dups` so that we can drain it while calling self methods.
-        // It is never used as a field after this point.
-        for dup in std::mem::take(&mut self.pending_dups) {
+        // Drain any remaining dups into the output.
+        for dup in self.pending_dups.drain(..) {
             debug!("    ...adding at least one pending dup={:?}", dup);
-            self.push_refined_span(dup);
+            self.refined_spans.push(dup);
         }
 
-        // Async functions wrap a closure that implements the body to be executed. The enclosing
-        // function is called and returns an `impl Future` without initially executing any of the
-        // body. To avoid showing the return from the enclosing function as a "covered" return from
-        // the closure, the enclosing function's `TerminatorKind::Return`s `CoverageSpan` is
-        // excluded. The closure's `Return` is the only one that will be counted. This provides
-        // adequate coverage, and more intuitive counts. (Avoids double-counting the closing brace
-        // of the function body.)
-        let body_ends_with_closure = if let Some(last_covspan) = self.refined_spans.last() {
-            last_covspan.is_closure && last_covspan.span.hi() == self.body_span.hi()
-        } else {
-            false
-        };
-
-        if !body_ends_with_closure {
-            self.push_refined_span(prev);
+        // There is usually a final span remaining in `prev` after the loop ends,
+        // so add it to the output as well.
+        if let Some(prev) = self.some_prev.take() {
+            debug!("    AT END, adding last prev={prev:?}");
+            self.refined_spans.push(prev);
         }
 
+        // Do one last merge pass, to simplify the output.
+        self.refined_spans.dedup_by(|b, a| {
+            if a.is_mergeable(b) {
+                debug!(?a, ?b, "merging list-adjacent refined spans");
+                a.merge_from(b);
+                true
+            } else {
+                false
+            }
+        });
+
         // Remove `CoverageSpan`s derived from closures, originally added to ensure the coverage
         // regions for the current function leave room for the closure's own coverage regions
         // (injected separately, from the closure's own MIR).
@@ -355,18 +350,6 @@ impl<'a> CoverageSpansGenerator<'a> {
         self.refined_spans
     }
 
-    fn push_refined_span(&mut self, covspan: CoverageSpan) {
-        if let Some(last) = self.refined_spans.last_mut()
-            && last.is_mergeable(&covspan)
-        {
-            // Instead of pushing the new span, merge it with the last refined span.
-            debug!(?last, ?covspan, "merging new refined span with last refined span");
-            last.merge_from(covspan);
-        } else {
-            self.refined_spans.push(covspan);
-        }
-    }
-
     /// If `curr` is part of a new macro expansion, carve out and push a separate
     /// span that ends just after the macro name and its subsequent `!`.
     fn maybe_push_macro_name_span(&mut self) {
@@ -379,57 +362,59 @@ impl<'a> CoverageSpansGenerator<'a> {
             return;
         }
 
-        let merged_prefix_len = self.curr_original_span.lo() - curr.span.lo();
-        let after_macro_bang = merged_prefix_len + BytePos(visible_macro.as_str().len() as u32 + 1);
-        if self.curr().span.lo() + after_macro_bang > self.curr().span.hi() {
+        // The split point is relative to `curr_original_span`,
+        // because `curr.span` may have been merged with preceding spans.
+        let split_point_after_macro_bang = self.curr_original_span.lo()
+            + BytePos(visible_macro.as_str().len() as u32)
+            + BytePos(1); // add 1 for the `!`
+        debug_assert!(split_point_after_macro_bang <= curr.span.hi());
+        if split_point_after_macro_bang > curr.span.hi() {
             // Something is wrong with the macro name span;
-            // return now to avoid emitting malformed mappings.
-            // FIXME(#117788): Track down why this happens.
+            // return now to avoid emitting malformed mappings (e.g. #117788).
             return;
         }
+
         let mut macro_name_cov = curr.clone();
-        self.curr_mut().span = curr.span.with_lo(curr.span.lo() + after_macro_bang);
-        macro_name_cov.span =
-            macro_name_cov.span.with_hi(macro_name_cov.span.lo() + after_macro_bang);
+        macro_name_cov.span = macro_name_cov.span.with_hi(split_point_after_macro_bang);
+        self.curr_mut().span = curr.span.with_lo(split_point_after_macro_bang);
+
         debug!(
             "  and curr starts a new macro expansion, so add a new span just for \
             the macro `{visible_macro}!`, new span={macro_name_cov:?}",
         );
-        self.push_refined_span(macro_name_cov);
+        self.refined_spans.push(macro_name_cov);
     }
 
+    #[track_caller]
     fn curr(&self) -> &CoverageSpan {
-        self.some_curr
-            .as_ref()
-            .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
+        self.some_curr.as_ref().unwrap_or_else(|| bug!("some_curr is None (curr)"))
     }
 
+    #[track_caller]
     fn curr_mut(&mut self) -> &mut CoverageSpan {
-        self.some_curr
-            .as_mut()
-            .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
+        self.some_curr.as_mut().unwrap_or_else(|| bug!("some_curr is None (curr_mut)"))
     }
 
     /// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the
     /// `curr` coverage span.
+    #[track_caller]
     fn take_curr(&mut self) -> CoverageSpan {
-        self.some_curr.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
+        self.some_curr.take().unwrap_or_else(|| bug!("some_curr is None (take_curr)"))
     }
 
+    #[track_caller]
     fn prev(&self) -> &CoverageSpan {
-        self.some_prev
-            .as_ref()
-            .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
+        self.some_prev.as_ref().unwrap_or_else(|| bug!("some_prev is None (prev)"))
     }
 
+    #[track_caller]
     fn prev_mut(&mut self) -> &mut CoverageSpan {
-        self.some_prev
-            .as_mut()
-            .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
+        self.some_prev.as_mut().unwrap_or_else(|| bug!("some_prev is None (prev_mut)"))
     }
 
+    #[track_caller]
     fn take_prev(&mut self) -> CoverageSpan {
-        self.some_prev.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
+        self.some_prev.take().unwrap_or_else(|| bug!("some_prev is None (take_prev)"))
     }
 
     /// If there are `pending_dups` but `prev` is not a matching dup (`prev.span` doesn't match the
@@ -452,19 +437,14 @@ impl<'a> CoverageSpansGenerator<'a> {
             previous iteration, or prev started a new disjoint span"
         );
         if last_dup.span.hi() <= self.curr().span.lo() {
-            // Temporarily steal `pending_dups` into a local, so that we can
-            // drain it while calling other self methods.
-            let mut pending_dups = std::mem::take(&mut self.pending_dups);
-            for dup in pending_dups.drain(..) {
+            for dup in self.pending_dups.drain(..) {
                 debug!("    ...adding at least one pending={:?}", dup);
-                self.push_refined_span(dup);
+                self.refined_spans.push(dup);
             }
-            // The list of dups is now empty, but we can recycle its capacity.
-            assert!(pending_dups.is_empty() && self.pending_dups.is_empty());
-            self.pending_dups = pending_dups;
         } else {
             self.pending_dups.clear();
         }
+        assert!(self.pending_dups.is_empty());
     }
 
     /// Advance `prev` to `curr` (if any), and `curr` to the next `CoverageSpan` in sorted order.
@@ -511,22 +491,18 @@ impl<'a> CoverageSpansGenerator<'a> {
         let has_pre_closure_span = prev.span.lo() < right_cutoff;
         let has_post_closure_span = prev.span.hi() > right_cutoff;
 
-        // Temporarily steal `pending_dups` into a local, so that we can
-        // mutate and/or drain it while calling other self methods.
-        let mut pending_dups = std::mem::take(&mut self.pending_dups);
-
         if has_pre_closure_span {
             let mut pre_closure = self.prev().clone();
             pre_closure.span = pre_closure.span.with_hi(left_cutoff);
             debug!("  prev overlaps a closure. Adding span for pre_closure={:?}", pre_closure);
-            if !pending_dups.is_empty() {
-                for mut dup in pending_dups.iter().cloned() {
-                    dup.span = dup.span.with_hi(left_cutoff);
-                    debug!("    ...and at least one pre_closure dup={:?}", dup);
-                    self.push_refined_span(dup);
-                }
+
+            for mut dup in self.pending_dups.iter().cloned() {
+                dup.span = dup.span.with_hi(left_cutoff);
+                debug!("    ...and at least one pre_closure dup={:?}", dup);
+                self.refined_spans.push(dup);
             }
-            self.push_refined_span(pre_closure);
+
+            self.refined_spans.push(pre_closure);
         }
 
         if has_post_closure_span {
@@ -535,19 +511,17 @@ impl<'a> CoverageSpansGenerator<'a> {
             // about how the `CoverageSpan`s are ordered.)
             self.prev_mut().span = self.prev().span.with_lo(right_cutoff);
             debug!("  Mutated prev.span to start after the closure. prev={:?}", self.prev());
-            for dup in pending_dups.iter_mut() {
+
+            for dup in &mut self.pending_dups {
                 debug!("    ...and at least one overlapping dup={:?}", dup);
                 dup.span = dup.span.with_lo(right_cutoff);
             }
+
             let closure_covspan = self.take_curr(); // Prevent this curr from becoming prev.
-            self.push_refined_span(closure_covspan); // since self.prev() was already updated
+            self.refined_spans.push(closure_covspan); // since self.prev() was already updated
         } else {
-            pending_dups.clear();
+            self.pending_dups.clear();
         }
-
-        // Restore the modified post-closure spans, or the empty vector's capacity.
-        assert!(self.pending_dups.is_empty());
-        self.pending_dups = pending_dups;
     }
 
     /// Called if `curr.span` equals `prev_original_span` (and potentially equal to all
@@ -643,7 +617,7 @@ impl<'a> CoverageSpansGenerator<'a> {
             } else {
                 debug!("  ... adding modified prev={:?}", self.prev());
                 let prev = self.take_prev();
-                self.push_refined_span(prev);
+                self.refined_spans.push(prev);
             }
         } else {
             // with `pending_dups`, `prev` cannot have any statements that don't overlap
diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
index e1531f2c239..eab9a9c98f8 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
@@ -44,6 +44,16 @@ pub(super) fn mir_to_initial_sorted_coverage_spans(
             .then_with(|| Ord::cmp(&a.is_closure, &b.is_closure).reverse())
     });
 
+    // The desugaring of an async function includes a closure containing the
+    // original function body, and a terminator that returns the `impl Future`.
+    // That terminator will cause a confusing coverage count for the function's
+    // closing brace, so discard everything after the body closure span.
+    if let Some(body_closure_index) =
+        initial_spans.iter().rposition(|covspan| covspan.is_closure && covspan.span == body_span)
+    {
+        initial_spans.truncate(body_closure_index + 1);
+    }
+
     initial_spans
 }
 
@@ -92,13 +102,13 @@ fn is_closure(statement: &Statement<'_>) -> bool {
 /// If the MIR `Statement` has a span contributive to computing coverage spans,
 /// return it; otherwise return `None`.
 fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
+    use mir::coverage::CoverageKind;
+
     match statement.kind {
         // These statements have spans that are often outside the scope of the executed source code
         // for their parent `BasicBlock`.
         StatementKind::StorageLive(_)
         | StatementKind::StorageDead(_)
-        // Coverage should not be encountered, but don't inject coverage coverage
-        | StatementKind::Coverage(_)
         // Ignore `ConstEvalCounter`s
         | StatementKind::ConstEvalCounter
         // Ignore `Nop`s
@@ -122,9 +132,13 @@ fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
         // If and when the Issue is resolved, remove this special case match pattern:
         StatementKind::FakeRead(box (FakeReadCause::ForGuardBinding, _)) => None,
 
-        // Retain spans from all other statements
+        // Retain spans from most other statements.
         StatementKind::FakeRead(box (_, _)) // Not including `ForGuardBinding`
         | StatementKind::Intrinsic(..)
+        | StatementKind::Coverage(box mir::Coverage {
+            // The purpose of `SpanMarker` is to be matched and accepted here.
+            kind: CoverageKind::SpanMarker
+        })
         | StatementKind::Assign(_)
         | StatementKind::SetDiscriminant { .. }
         | StatementKind::Deinit(..)
@@ -133,6 +147,11 @@ fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
         | StatementKind::AscribeUserType(_, _) => {
             Some(statement.source_info.span)
         }
+
+        StatementKind::Coverage(box mir::Coverage {
+            // These coverage statements should not exist prior to coverage instrumentation.
+            kind: CoverageKind::CounterIncrement { .. } | CoverageKind::ExpressionUsed { .. }
+        }) => bug!("Unexpected coverage statement found during coverage instrumentation: {statement:?}"),
     }
 }
 
diff --git a/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml b/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml
deleted file mode 100644
index f753caa9124..00000000000
--- a/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml
+++ /dev/null
@@ -1,7 +0,0 @@
-[package]
-name = "coverage_test_macros"
-version = "0.0.0"
-edition = "2021"
-
-[lib]
-proc-macro = true
diff --git a/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs b/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs
deleted file mode 100644
index f41adf667ec..00000000000
--- a/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs
+++ /dev/null
@@ -1,6 +0,0 @@
-use proc_macro::TokenStream;
-
-#[proc_macro]
-pub fn let_bcb(item: TokenStream) -> TokenStream {
-    format!("let bcb{item} = graph::BasicCoverageBlock::from_usize({item});").parse().unwrap()
-}
diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs
index 702fe5f563e..302cbf05d78 100644
--- a/compiler/rustc_mir_transform/src/coverage/tests.rs
+++ b/compiler/rustc_mir_transform/src/coverage/tests.rs
@@ -27,8 +27,6 @@
 use super::counters;
 use super::graph::{self, BasicCoverageBlock};
 
-use coverage_test_macros::let_bcb;
-
 use itertools::Itertools;
 use rustc_data_structures::graph::WithNumNodes;
 use rustc_data_structures::graph::WithSuccessors;
@@ -37,6 +35,10 @@ use rustc_middle::mir::*;
 use rustc_middle::ty;
 use rustc_span::{self, BytePos, Pos, Span, DUMMY_SP};
 
+fn bcb(index: u32) -> BasicCoverageBlock {
+    BasicCoverageBlock::from_u32(index)
+}
+
 // All `TEMP_BLOCK` targets should be replaced before calling `to_body() -> mir::Body`.
 const TEMP_BLOCK: BasicBlock = BasicBlock::MAX;
 
@@ -300,12 +302,15 @@ fn goto_switchint<'a>() -> Body<'a> {
     mir_body
 }
 
-macro_rules! assert_successors {
-    ($basic_coverage_blocks:ident, $i:ident, [$($successor:ident),*]) => {
-        let mut successors = $basic_coverage_blocks.successors[$i].clone();
-        successors.sort_unstable();
-        assert_eq!(successors, vec![$($successor),*]);
-    }
+#[track_caller]
+fn assert_successors(
+    basic_coverage_blocks: &graph::CoverageGraph,
+    bcb: BasicCoverageBlock,
+    expected_successors: &[BasicCoverageBlock],
+) {
+    let mut successors = basic_coverage_blocks.successors[bcb].clone();
+    successors.sort_unstable();
+    assert_eq!(successors, expected_successors);
 }
 
 #[test]
@@ -334,13 +339,9 @@ fn test_covgraph_goto_switchint() {
         basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
     );
 
-    let_bcb!(0);
-    let_bcb!(1);
-    let_bcb!(2);
-
-    assert_successors!(basic_coverage_blocks, bcb0, [bcb1, bcb2]);
-    assert_successors!(basic_coverage_blocks, bcb1, []);
-    assert_successors!(basic_coverage_blocks, bcb2, []);
+    assert_successors(&basic_coverage_blocks, bcb(0), &[bcb(1), bcb(2)]);
+    assert_successors(&basic_coverage_blocks, bcb(1), &[]);
+    assert_successors(&basic_coverage_blocks, bcb(2), &[]);
 }
 
 /// Create a mock `Body` with a loop.
@@ -418,15 +419,10 @@ fn test_covgraph_switchint_then_loop_else_return() {
         basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
     );
 
-    let_bcb!(0);
-    let_bcb!(1);
-    let_bcb!(2);
-    let_bcb!(3);
-
-    assert_successors!(basic_coverage_blocks, bcb0, [bcb1]);
-    assert_successors!(basic_coverage_blocks, bcb1, [bcb2, bcb3]);
-    assert_successors!(basic_coverage_blocks, bcb2, []);
-    assert_successors!(basic_coverage_blocks, bcb3, [bcb1]);
+    assert_successors(&basic_coverage_blocks, bcb(0), &[bcb(1)]);
+    assert_successors(&basic_coverage_blocks, bcb(1), &[bcb(2), bcb(3)]);
+    assert_successors(&basic_coverage_blocks, bcb(2), &[]);
+    assert_successors(&basic_coverage_blocks, bcb(3), &[bcb(1)]);
 }
 
 /// Create a mock `Body` with nested loops.
@@ -546,21 +542,13 @@ fn test_covgraph_switchint_loop_then_inner_loop_else_break() {
         basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
     );
 
-    let_bcb!(0);
-    let_bcb!(1);
-    let_bcb!(2);
-    let_bcb!(3);
-    let_bcb!(4);
-    let_bcb!(5);
-    let_bcb!(6);
-
-    assert_successors!(basic_coverage_blocks, bcb0, [bcb1]);
-    assert_successors!(basic_coverage_blocks, bcb1, [bcb2, bcb3]);
-    assert_successors!(basic_coverage_blocks, bcb2, []);
-    assert_successors!(basic_coverage_blocks, bcb3, [bcb4]);
-    assert_successors!(basic_coverage_blocks, bcb4, [bcb5, bcb6]);
-    assert_successors!(basic_coverage_blocks, bcb5, [bcb1]);
-    assert_successors!(basic_coverage_blocks, bcb6, [bcb4]);
+    assert_successors(&basic_coverage_blocks, bcb(0), &[bcb(1)]);
+    assert_successors(&basic_coverage_blocks, bcb(1), &[bcb(2), bcb(3)]);
+    assert_successors(&basic_coverage_blocks, bcb(2), &[]);
+    assert_successors(&basic_coverage_blocks, bcb(3), &[bcb(4)]);
+    assert_successors(&basic_coverage_blocks, bcb(4), &[bcb(5), bcb(6)]);
+    assert_successors(&basic_coverage_blocks, bcb(5), &[bcb(1)]);
+    assert_successors(&basic_coverage_blocks, bcb(6), &[bcb(4)]);
 }
 
 #[test]
@@ -595,10 +583,7 @@ fn test_find_loop_backedges_one() {
         backedges
     );
 
-    let_bcb!(1);
-    let_bcb!(3);
-
-    assert_eq!(backedges[bcb1], vec![bcb3]);
+    assert_eq!(backedges[bcb(1)], &[bcb(3)]);
 }
 
 #[test]
@@ -613,13 +598,8 @@ fn test_find_loop_backedges_two() {
         backedges
     );
 
-    let_bcb!(1);
-    let_bcb!(4);
-    let_bcb!(5);
-    let_bcb!(6);
-
-    assert_eq!(backedges[bcb1], vec![bcb5]);
-    assert_eq!(backedges[bcb4], vec![bcb6]);
+    assert_eq!(backedges[bcb(1)], &[bcb(5)]);
+    assert_eq!(backedges[bcb(4)], &[bcb(6)]);
 }
 
 #[test]
@@ -632,13 +612,11 @@ fn test_traverse_coverage_with_loops() {
         traversed_in_order.push(bcb);
     }
 
-    let_bcb!(6);
-
     // bcb0 is visited first. Then bcb1 starts the first loop, and all remaining nodes, *except*
     // bcb6 are inside the first loop.
     assert_eq!(
         *traversed_in_order.last().expect("should have elements"),
-        bcb6,
+        bcb(6),
         "bcb6 should not be visited until all nodes inside the first loop have been visited"
     );
 }
@@ -656,20 +634,18 @@ fn test_make_bcb_counters() {
         coverage_counters.make_bcb_counters(&basic_coverage_blocks, bcb_has_coverage_spans);
         assert_eq!(coverage_counters.num_expressions(), 0);
 
-        let_bcb!(1);
         assert_eq!(
             0, // bcb1 has a `Counter` with id = 0
-            match coverage_counters.bcb_counter(bcb1).expect("should have a counter") {
+            match coverage_counters.bcb_counter(bcb(1)).expect("should have a counter") {
                 counters::BcbCounter::Counter { id, .. } => id,
                 _ => panic!("expected a Counter"),
             }
             .as_u32()
         );
 
-        let_bcb!(2);
         assert_eq!(
             1, // bcb2 has a `Counter` with id = 1
-            match coverage_counters.bcb_counter(bcb2).expect("should have a counter") {
+            match coverage_counters.bcb_counter(bcb(2)).expect("should have a counter") {
                 counters::BcbCounter::Counter { id, .. } => id,
                 _ => panic!("expected a Counter"),
             }
diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
index 21b92e6d77c..e9949ebbc87 100644
--- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
+++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
@@ -8,6 +8,7 @@ use rustc_hir::def::DefKind;
 use rustc_middle::mir::interpret::{AllocId, ConstAllocation, InterpResult, Scalar};
 use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
 use rustc_middle::mir::*;
+use rustc_middle::query::TyCtxtAt;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_mir_dataflow::value_analysis::{
@@ -876,7 +877,7 @@ impl<'mir, 'tcx: 'mir> rustc_const_eval::interpret::Machine<'mir, 'tcx> for Dumm
     }
 
     fn before_access_global(
-        _tcx: TyCtxt<'tcx>,
+        _tcx: TyCtxtAt<'tcx>,
         _machine: &Self,
         _alloc_id: AllocId,
         alloc: ConstAllocation<'tcx>,
diff --git a/compiler/rustc_mir_transform/src/errors.rs b/compiler/rustc_mir_transform/src/errors.rs
index 3a5270f105a..2358661738a 100644
--- a/compiler/rustc_mir_transform/src/errors.rs
+++ b/compiler/rustc_mir_transform/src/errors.rs
@@ -2,7 +2,7 @@ use std::borrow::Cow;
 
 use rustc_errors::{
     Applicability, DecorateLint, DiagnosticArgValue, DiagnosticBuilder, DiagnosticMessage,
-    EmissionGuarantee, Handler, IntoDiagnostic,
+    EmissionGuarantee, ErrorGuaranteed, Handler, IntoDiagnostic,
 };
 use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic};
 use rustc_middle::mir::{AssertKind, UnsafetyViolationDetails};
@@ -62,9 +62,9 @@ pub(crate) struct RequiresUnsafe {
 // so we need to eagerly translate the label here, which isn't supported by the derive API
 // We could also exhaustively list out the primary messages for all unsafe violations,
 // but this would result in a lot of duplication.
-impl<'sess, G: EmissionGuarantee> IntoDiagnostic<'sess, G> for RequiresUnsafe {
+impl<'sess> IntoDiagnostic<'sess> for RequiresUnsafe {
     #[track_caller]
-    fn into_diagnostic(self, handler: &'sess Handler) -> DiagnosticBuilder<'sess, G> {
+    fn into_diagnostic(self, handler: &'sess Handler) -> DiagnosticBuilder<'sess, ErrorGuaranteed> {
         let mut diag = handler.struct_diagnostic(fluent::mir_transform_requires_unsafe);
         diag.code(rustc_errors::DiagnosticId::Error("E0133".to_string()));
         diag.set_span(self.span);
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index 735960f31b3..1a5979ef714 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -388,7 +388,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                         self.ecx.copy_op(op, &field_dest, /*allow_transmute*/ false).ok()?;
                     }
                     self.ecx.write_discriminant(variant.unwrap_or(FIRST_VARIANT), &dest).ok()?;
-                    self.ecx.alloc_mark_immutable(dest.ptr().provenance.unwrap()).ok()?;
+                    self.ecx
+                        .alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id())
+                        .ok()?;
                     dest.into()
                 } else {
                     return None;
@@ -928,7 +930,8 @@ fn op_to_prop_const<'tcx>(
         }
 
         let pointer = mplace.ptr().into_pointer_or_addr().ok()?;
-        let (alloc_id, offset) = pointer.into_parts();
+        let (prov, offset) = pointer.into_parts();
+        let alloc_id = prov.alloc_id();
         intern_const_alloc_for_constprop(ecx, alloc_id).ok()?;
         if matches!(ecx.tcx.global_alloc(alloc_id), GlobalAlloc::Memory(_)) {
             // `alloc_id` may point to a static. Codegen will choke on an `Indirect` with anything