about summary refs log tree commit diff
path: root/compiler/rustc_mir_transform/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_mir_transform/src')
-rw-r--r--compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs2
-rw-r--r--compiler/rustc_mir_transform/src/copy_prop.rs41
-rw-r--r--compiler/rustc_mir_transform/src/coroutine.rs42
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters.rs2
-rw-r--r--compiler/rustc_mir_transform/src/coverage/graph.rs26
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mappings.rs275
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs115
-rw-r--r--compiler/rustc_mir_transform/src/coverage/query.rs12
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs102
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs58
-rw-r--r--compiler/rustc_mir_transform/src/coverage/tests.rs3
-rw-r--r--compiler/rustc_mir_transform/src/deduce_param_attrs.rs2
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs25
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs45
-rw-r--r--compiler/rustc_mir_transform/src/inline/cycle.rs6
-rw-r--r--compiler/rustc_mir_transform/src/instsimplify.rs40
-rw-r--r--compiler/rustc_mir_transform/src/known_panics_lint.rs64
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs3
-rw-r--r--compiler/rustc_mir_transform/src/lower_intrinsics.rs28
-rw-r--r--compiler/rustc_mir_transform/src/lower_slice_len.rs2
-rw-r--r--compiler/rustc_mir_transform/src/match_branches.rs8
-rw-r--r--compiler/rustc_mir_transform/src/normalize_array_len.rs3
-rw-r--r--compiler/rustc_mir_transform/src/promote_consts.rs159
-rw-r--r--compiler/rustc_mir_transform/src/ref_prop.rs3
-rw-r--r--compiler/rustc_mir_transform/src/required_consts.rs13
-rw-r--r--compiler/rustc_mir_transform/src/shim.rs5
-rw-r--r--compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs618
-rw-r--r--compiler/rustc_mir_transform/src/ssa.rs55
28 files changed, 1429 insertions, 328 deletions
diff --git a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs
index da82f8de781..48a6a83e146 100644
--- a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs
+++ b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs
@@ -29,7 +29,7 @@ impl<'tcx> MirPass<'tcx> for CleanupPostBorrowck {
             for statement in basic_block.statements.iter_mut() {
                 match statement.kind {
                     StatementKind::AscribeUserType(..)
-                    | StatementKind::Assign(box (_, Rvalue::Ref(_, BorrowKind::Fake, _)))
+                    | StatementKind::Assign(box (_, Rvalue::Ref(_, BorrowKind::Fake(_), _)))
                     | StatementKind::Coverage(
                         // These kinds of coverage statements are markers inserted during
                         // MIR building, and are not needed after InstrumentCoverage.
diff --git a/compiler/rustc_mir_transform/src/copy_prop.rs b/compiler/rustc_mir_transform/src/copy_prop.rs
index 0119b95cced..c1f9313a377 100644
--- a/compiler/rustc_mir_transform/src/copy_prop.rs
+++ b/compiler/rustc_mir_transform/src/copy_prop.rs
@@ -3,7 +3,6 @@ use rustc_index::IndexSlice;
 use rustc_middle::mir::visit::*;
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
-use rustc_mir_dataflow::impls::borrowed_locals;
 
 use crate::ssa::SsaLocals;
 
@@ -32,8 +31,8 @@ impl<'tcx> MirPass<'tcx> for CopyProp {
 }
 
 fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-    let borrowed_locals = borrowed_locals(body);
-    let ssa = SsaLocals::new(body);
+    let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
+    let ssa = SsaLocals::new(tcx, body, param_env);
 
     let fully_moved = fully_moved_locals(&ssa, body);
     debug!(?fully_moved);
@@ -51,7 +50,7 @@ fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         tcx,
         copy_classes: ssa.copy_classes(),
         fully_moved,
-        borrowed_locals,
+        borrowed_locals: ssa.borrowed_locals(),
         storage_to_remove,
     }
     .visit_body_preserves_cfg(body);
@@ -101,7 +100,7 @@ struct Replacer<'a, 'tcx> {
     tcx: TyCtxt<'tcx>,
     fully_moved: BitSet<Local>,
     storage_to_remove: BitSet<Local>,
-    borrowed_locals: BitSet<Local>,
+    borrowed_locals: &'a BitSet<Local>,
     copy_classes: &'a IndexSlice<Local, Local>,
 }
 
@@ -112,6 +111,12 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> {
 
     fn visit_local(&mut self, local: &mut Local, ctxt: PlaceContext, _: Location) {
         let new_local = self.copy_classes[*local];
+        // We must not unify two locals that are borrowed. But this is fine if one is borrowed and
+        // the other is not. We chose to check the original local, and not the target. That way, if
+        // the original local is borrowed and the target is not, we do not pessimize the whole class.
+        if self.borrowed_locals.contains(*local) {
+            return;
+        }
         match ctxt {
             // Do not modify the local in storage statements.
             PlaceContext::NonUse(NonUseContext::StorageLive | NonUseContext::StorageDead) => {}
@@ -122,32 +127,14 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> {
         }
     }
 
-    fn visit_place(&mut self, place: &mut Place<'tcx>, ctxt: PlaceContext, loc: Location) {
+    fn visit_place(&mut self, place: &mut Place<'tcx>, _: PlaceContext, loc: Location) {
         if let Some(new_projection) = self.process_projection(place.projection, loc) {
             place.projection = self.tcx().mk_place_elems(&new_projection);
         }
 
-        let observes_address = match ctxt {
-            PlaceContext::NonMutatingUse(
-                NonMutatingUseContext::SharedBorrow
-                | NonMutatingUseContext::FakeBorrow
-                | NonMutatingUseContext::AddressOf,
-            ) => true,
-            // For debuginfo, merging locals is ok.
-            PlaceContext::NonUse(NonUseContext::VarDebugInfo) => {
-                self.borrowed_locals.contains(place.local)
-            }
-            _ => false,
-        };
-        if observes_address && !place.is_indirect() {
-            // We observe the address of `place.local`. Do not replace it.
-        } else {
-            self.visit_local(
-                &mut place.local,
-                PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
-                loc,
-            )
-        }
+        // Any non-mutating use context is ok.
+        let ctxt = PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy);
+        self.visit_local(&mut place.local, ctxt, loc)
     }
 
     fn visit_operand(&mut self, operand: &mut Operand<'tcx>, loc: Location) {
diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs
index e2a911f0dc7..3008016863e 100644
--- a/compiler/rustc_mir_transform/src/coroutine.rs
+++ b/compiler/rustc_mir_transform/src/coroutine.rs
@@ -80,6 +80,10 @@ use rustc_span::symbol::sym;
 use rustc_span::Span;
 use rustc_target::abi::{FieldIdx, VariantIdx};
 use rustc_target::spec::PanicStrategy;
+use rustc_trait_selection::infer::TyCtxtInferExt as _;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
+use rustc_trait_selection::traits::ObligationCtxt;
+use rustc_trait_selection::traits::{ObligationCause, ObligationCauseCode};
 use std::{iter, ops};
 
 pub struct StateTransform;
@@ -1256,7 +1260,7 @@ fn create_coroutine_drop_shim<'tcx>(
     }
 
     // Replace the return variable
-    body.local_decls[RETURN_PLACE] = LocalDecl::with_source_info(Ty::new_unit(tcx), source_info);
+    body.local_decls[RETURN_PLACE] = LocalDecl::with_source_info(tcx.types.unit, source_info);
 
     make_coroutine_state_argument_indirect(tcx, &mut body);
 
@@ -1584,10 +1588,46 @@ pub(crate) fn mir_coroutine_witnesses<'tcx>(
     let (_, coroutine_layout, _) = compute_layout(liveness_info, body);
 
     check_suspend_tys(tcx, &coroutine_layout, body);
+    check_field_tys_sized(tcx, &coroutine_layout, def_id);
 
     Some(coroutine_layout)
 }
 
+fn check_field_tys_sized<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    coroutine_layout: &CoroutineLayout<'tcx>,
+    def_id: LocalDefId,
+) {
+    // No need to check if unsized_locals/unsized_fn_params is disabled,
+    // since we will error during typeck.
+    if !tcx.features().unsized_locals && !tcx.features().unsized_fn_params {
+        return;
+    }
+
+    let infcx = tcx.infer_ctxt().ignoring_regions().build();
+    let param_env = tcx.param_env(def_id);
+
+    let ocx = ObligationCtxt::new(&infcx);
+    for field_ty in &coroutine_layout.field_tys {
+        ocx.register_bound(
+            ObligationCause::new(
+                field_ty.source_info.span,
+                def_id,
+                ObligationCauseCode::SizedCoroutineInterior(def_id),
+            ),
+            param_env,
+            field_ty.ty,
+            tcx.require_lang_item(hir::LangItem::Sized, Some(field_ty.source_info.span)),
+        );
+    }
+
+    let errors = ocx.select_all_or_error();
+    debug!(?errors);
+    if !errors.is_empty() {
+        infcx.err_ctxt().report_fulfillment_errors(errors);
+    }
+}
+
 impl<'tcx> MirPass<'tcx> for StateTransform {
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         let Some(old_yield_ty) = body.yield_ty() else {
diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs
index 69dc4f2ddea..6e73a476421 100644
--- a/compiler/rustc_mir_transform/src/coverage/counters.rs
+++ b/compiler/rustc_mir_transform/src/coverage/counters.rs
@@ -2,7 +2,7 @@ use std::fmt::{self, Debug};
 
 use rustc_data_structures::captures::Captures;
 use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::graph::WithNumNodes;
+use rustc_data_structures::graph::DirectedGraph;
 use rustc_index::IndexVec;
 use rustc_middle::mir::coverage::{CounterId, CovTerm, Expression, ExpressionId, Op};
 
diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs
index ed8c4d8283d..1895735ab35 100644
--- a/compiler/rustc_mir_transform/src/coverage/graph.rs
+++ b/compiler/rustc_mir_transform/src/coverage/graph.rs
@@ -1,7 +1,7 @@
 use rustc_data_structures::captures::Captures;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_data_structures::graph::dominators::{self, Dominators};
-use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode};
+use rustc_data_structures::graph::{self, DirectedGraph, StartNode};
 use rustc_index::bit_set::BitSet;
 use rustc_index::IndexVec;
 use rustc_middle::mir::{self, BasicBlock, Terminator, TerminatorKind};
@@ -193,16 +193,14 @@ impl IndexMut<BasicCoverageBlock> for CoverageGraph {
 
 impl graph::DirectedGraph for CoverageGraph {
     type Node = BasicCoverageBlock;
-}
 
-impl graph::WithNumNodes for CoverageGraph {
     #[inline]
     fn num_nodes(&self) -> usize {
         self.bcbs.len()
     }
 }
 
-impl graph::WithStartNode for CoverageGraph {
+impl graph::StartNode for CoverageGraph {
     #[inline]
     fn start_node(&self) -> Self::Node {
         self.bcb_from_bb(mir::START_BLOCK)
@@ -210,28 +208,16 @@ impl graph::WithStartNode for CoverageGraph {
     }
 }
 
-type BcbSuccessors<'graph> = std::slice::Iter<'graph, BasicCoverageBlock>;
-
-impl<'graph> graph::GraphSuccessors<'graph> for CoverageGraph {
-    type Item = BasicCoverageBlock;
-    type Iter = std::iter::Cloned<BcbSuccessors<'graph>>;
-}
-
-impl graph::WithSuccessors for CoverageGraph {
+impl graph::Successors for CoverageGraph {
     #[inline]
-    fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter {
+    fn successors(&self, node: Self::Node) -> impl Iterator<Item = Self::Node> {
         self.successors[node].iter().cloned()
     }
 }
 
-impl<'graph> graph::GraphPredecessors<'graph> for CoverageGraph {
-    type Item = BasicCoverageBlock;
-    type Iter = std::iter::Copied<std::slice::Iter<'graph, BasicCoverageBlock>>;
-}
-
-impl graph::WithPredecessors for CoverageGraph {
+impl graph::Predecessors for CoverageGraph {
     #[inline]
-    fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
+    fn predecessors(&self, node: Self::Node) -> impl Iterator<Item = Self::Node> {
         self.predecessors[node].iter().copied()
     }
 }
diff --git a/compiler/rustc_mir_transform/src/coverage/mappings.rs b/compiler/rustc_mir_transform/src/coverage/mappings.rs
new file mode 100644
index 00000000000..d364658efb6
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/coverage/mappings.rs
@@ -0,0 +1,275 @@
+use std::collections::BTreeSet;
+
+use rustc_data_structures::graph::DirectedGraph;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::coverage::{
+    BlockMarkerId, BranchSpan, ConditionInfo, CoverageKind, MCDCBranchSpan, MCDCDecisionSpan,
+};
+use rustc_middle::mir::{self, BasicBlock, StatementKind};
+use rustc_span::Span;
+
+use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph, START_BCB};
+use crate::coverage::spans::{
+    extract_refined_covspans, unexpand_into_body_span_with_visible_macro,
+};
+use crate::coverage::ExtractedHirInfo;
+use rustc_index::IndexVec;
+
+#[derive(Clone, Debug)]
+pub(super) enum BcbMappingKind {
+    /// Associates an ordinary executable code span with its corresponding BCB.
+    Code(BasicCoverageBlock),
+
+    // Ordinary branch mappings are stored separately, so they don't have a
+    // variant in this enum.
+    //
+    /// Associates a mcdc branch span with condition info besides fields for normal branch.
+    MCDCBranch {
+        true_bcb: BasicCoverageBlock,
+        false_bcb: BasicCoverageBlock,
+        /// If `None`, this actually represents a normal branch mapping inserted
+        /// for code that was too complex for MC/DC.
+        condition_info: Option<ConditionInfo>,
+        decision_depth: u16,
+    },
+    /// Associates a mcdc decision with its join BCB.
+    MCDCDecision {
+        end_bcbs: BTreeSet<BasicCoverageBlock>,
+        bitmap_idx: u32,
+        conditions_num: u16,
+        decision_depth: u16,
+    },
+}
+
+#[derive(Debug)]
+pub(super) struct BcbMapping {
+    pub(super) kind: BcbMappingKind,
+    pub(super) span: Span,
+}
+
+/// This is separate from [`BcbMappingKind`] to help prepare for larger changes
+/// that will be needed for improved branch coverage in the future.
+/// (See <https://github.com/rust-lang/rust/pull/124217>.)
+#[derive(Debug)]
+pub(super) struct BcbBranchPair {
+    pub(super) span: Span,
+    pub(super) true_bcb: BasicCoverageBlock,
+    pub(super) false_bcb: BasicCoverageBlock,
+}
+
+pub(super) struct CoverageSpans {
+    bcb_has_mappings: BitSet<BasicCoverageBlock>,
+    pub(super) mappings: Vec<BcbMapping>,
+    pub(super) branch_pairs: Vec<BcbBranchPair>,
+    test_vector_bitmap_bytes: u32,
+}
+
+impl CoverageSpans {
+    pub(super) fn bcb_has_coverage_spans(&self, bcb: BasicCoverageBlock) -> bool {
+        self.bcb_has_mappings.contains(bcb)
+    }
+
+    pub(super) fn test_vector_bitmap_bytes(&self) -> u32 {
+        self.test_vector_bitmap_bytes
+    }
+}
+
+/// Extracts coverage-relevant spans from MIR, and associates them with
+/// their corresponding BCBs.
+///
+/// Returns `None` if no coverage-relevant spans could be extracted.
+pub(super) fn generate_coverage_spans(
+    mir_body: &mir::Body<'_>,
+    hir_info: &ExtractedHirInfo,
+    basic_coverage_blocks: &CoverageGraph,
+) -> Option<CoverageSpans> {
+    let mut mappings = vec![];
+    let mut branch_pairs = vec![];
+
+    if hir_info.is_async_fn {
+        // An async function desugars into a function that returns a future,
+        // with the user code wrapped in a closure. Any spans in the desugared
+        // outer function will be unhelpful, so just keep the signature span
+        // and ignore all of the spans in the MIR body.
+        if let Some(span) = hir_info.fn_sig_span_extended {
+            mappings.push(BcbMapping { kind: BcbMappingKind::Code(START_BCB), span });
+        }
+    } else {
+        extract_refined_covspans(mir_body, hir_info, basic_coverage_blocks, &mut mappings);
+
+        branch_pairs.extend(extract_branch_pairs(mir_body, hir_info, basic_coverage_blocks));
+
+        mappings.extend(extract_mcdc_mappings(mir_body, hir_info.body_span, basic_coverage_blocks));
+    }
+
+    if mappings.is_empty() && branch_pairs.is_empty() {
+        return None;
+    }
+
+    // Identify which BCBs have one or more mappings.
+    let mut bcb_has_mappings = BitSet::new_empty(basic_coverage_blocks.num_nodes());
+    let mut insert = |bcb| {
+        bcb_has_mappings.insert(bcb);
+    };
+    let mut test_vector_bitmap_bytes = 0;
+    for BcbMapping { kind, span: _ } in &mappings {
+        match *kind {
+            BcbMappingKind::Code(bcb) => insert(bcb),
+            BcbMappingKind::MCDCBranch { true_bcb, false_bcb, .. } => {
+                insert(true_bcb);
+                insert(false_bcb);
+            }
+            BcbMappingKind::MCDCDecision { bitmap_idx, conditions_num, .. } => {
+                // `bcb_has_mappings` is used for inject coverage counters
+                // but they are not needed for decision BCBs.
+                // While the length of test vector bitmap should be calculated here.
+                test_vector_bitmap_bytes = test_vector_bitmap_bytes
+                    .max(bitmap_idx + (1_u32 << conditions_num as u32).div_ceil(8));
+            }
+        }
+    }
+    for &BcbBranchPair { true_bcb, false_bcb, .. } in &branch_pairs {
+        insert(true_bcb);
+        insert(false_bcb);
+    }
+
+    Some(CoverageSpans { bcb_has_mappings, mappings, branch_pairs, test_vector_bitmap_bytes })
+}
+
+fn resolve_block_markers(
+    branch_info: &mir::coverage::BranchInfo,
+    mir_body: &mir::Body<'_>,
+) -> IndexVec<BlockMarkerId, Option<BasicBlock>> {
+    let mut block_markers = IndexVec::<BlockMarkerId, Option<BasicBlock>>::from_elem_n(
+        None,
+        branch_info.num_block_markers,
+    );
+
+    // Fill out the mapping from block marker IDs to their enclosing blocks.
+    for (bb, data) in mir_body.basic_blocks.iter_enumerated() {
+        for statement in &data.statements {
+            if let StatementKind::Coverage(CoverageKind::BlockMarker { id }) = statement.kind {
+                block_markers[id] = Some(bb);
+            }
+        }
+    }
+
+    block_markers
+}
+
+// FIXME: There is currently a lot of redundancy between
+// `extract_branch_pairs` and `extract_mcdc_mappings`. This is needed so
+// that they can each be modified without interfering with the other, but in
+// the long term we should try to bring them together again when branch coverage
+// and MC/DC coverage support are more mature.
+
+pub(super) fn extract_branch_pairs(
+    mir_body: &mir::Body<'_>,
+    hir_info: &ExtractedHirInfo,
+    basic_coverage_blocks: &CoverageGraph,
+) -> Vec<BcbBranchPair> {
+    let Some(branch_info) = mir_body.coverage_branch_info.as_deref() else { return vec![] };
+
+    let block_markers = resolve_block_markers(branch_info, mir_body);
+
+    branch_info
+        .branch_spans
+        .iter()
+        .filter_map(|&BranchSpan { span: raw_span, true_marker, false_marker }| {
+            // For now, ignore any branch span that was introduced by
+            // expansion. This makes things like assert macros less noisy.
+            if !raw_span.ctxt().outer_expn_data().is_root() {
+                return None;
+            }
+            let (span, _) =
+                unexpand_into_body_span_with_visible_macro(raw_span, hir_info.body_span)?;
+
+            let bcb_from_marker =
+                |marker: BlockMarkerId| basic_coverage_blocks.bcb_from_bb(block_markers[marker]?);
+
+            let true_bcb = bcb_from_marker(true_marker)?;
+            let false_bcb = bcb_from_marker(false_marker)?;
+
+            Some(BcbBranchPair { span, true_bcb, false_bcb })
+        })
+        .collect::<Vec<_>>()
+}
+
+pub(super) fn extract_mcdc_mappings(
+    mir_body: &mir::Body<'_>,
+    body_span: Span,
+    basic_coverage_blocks: &CoverageGraph,
+) -> Vec<BcbMapping> {
+    let Some(branch_info) = mir_body.coverage_branch_info.as_deref() else {
+        return vec![];
+    };
+
+    let block_markers = resolve_block_markers(branch_info, mir_body);
+
+    let bcb_from_marker =
+        |marker: BlockMarkerId| basic_coverage_blocks.bcb_from_bb(block_markers[marker]?);
+
+    let check_branch_bcb =
+        |raw_span: Span, true_marker: BlockMarkerId, false_marker: BlockMarkerId| {
+            // For now, ignore any branch span that was introduced by
+            // expansion. This makes things like assert macros less noisy.
+            if !raw_span.ctxt().outer_expn_data().is_root() {
+                return None;
+            }
+            let (span, _) = unexpand_into_body_span_with_visible_macro(raw_span, body_span)?;
+
+            let true_bcb = bcb_from_marker(true_marker)?;
+            let false_bcb = bcb_from_marker(false_marker)?;
+            Some((span, true_bcb, false_bcb))
+        };
+
+    let mcdc_branch_filter_map = |&MCDCBranchSpan {
+                                      span: raw_span,
+                                      true_marker,
+                                      false_marker,
+                                      condition_info,
+                                      decision_depth,
+                                  }| {
+        check_branch_bcb(raw_span, true_marker, false_marker).map(|(span, true_bcb, false_bcb)| {
+            BcbMapping {
+                kind: BcbMappingKind::MCDCBranch {
+                    true_bcb,
+                    false_bcb,
+                    condition_info,
+                    decision_depth,
+                },
+                span,
+            }
+        })
+    };
+
+    let mut next_bitmap_idx = 0;
+
+    let decision_filter_map = |decision: &MCDCDecisionSpan| {
+        let (span, _) = unexpand_into_body_span_with_visible_macro(decision.span, body_span)?;
+
+        let end_bcbs = decision
+            .end_markers
+            .iter()
+            .map(|&marker| bcb_from_marker(marker))
+            .collect::<Option<_>>()?;
+
+        let bitmap_idx = next_bitmap_idx;
+        next_bitmap_idx += (1_u32 << decision.conditions_num).div_ceil(8);
+
+        Some(BcbMapping {
+            kind: BcbMappingKind::MCDCDecision {
+                end_bcbs,
+                bitmap_idx,
+                conditions_num: decision.conditions_num as u16,
+                decision_depth: decision.decision_depth,
+            },
+            span,
+        })
+    };
+
+    std::iter::empty()
+        .chain(branch_info.mcdc_branch_spans.iter().filter_map(mcdc_branch_filter_map))
+        .chain(branch_info.mcdc_decision_spans.iter().filter_map(decision_filter_map))
+        .collect::<Vec<_>>()
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index d382d2c03c2..ffe61e761c5 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -2,14 +2,14 @@ pub mod query;
 
 mod counters;
 mod graph;
+mod mappings;
 mod spans;
-
 #[cfg(test)]
 mod tests;
 
 use self::counters::{CounterIncrementSite, CoverageCounters};
 use self::graph::{BasicCoverageBlock, CoverageGraph};
-use self::spans::{BcbMapping, BcbMappingKind, CoverageSpans};
+use self::mappings::{BcbBranchPair, BcbMapping, BcbMappingKind, CoverageSpans};
 
 use crate::MirPass;
 
@@ -71,7 +71,7 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
     ////////////////////////////////////////////////////
     // Compute coverage spans from the `CoverageGraph`.
     let Some(coverage_spans) =
-        spans::generate_coverage_spans(mir_body, &hir_info, &basic_coverage_blocks)
+        mappings::generate_coverage_spans(mir_body, &hir_info, &basic_coverage_blocks)
     else {
         // No relevant spans were found in MIR, so skip instrumenting this function.
         return;
@@ -100,11 +100,25 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
         &coverage_counters,
     );
 
+    inject_mcdc_statements(mir_body, &basic_coverage_blocks, &coverage_spans);
+
+    let mcdc_num_condition_bitmaps = coverage_spans
+        .mappings
+        .iter()
+        .filter_map(|bcb_mapping| match bcb_mapping.kind {
+            BcbMappingKind::MCDCDecision { decision_depth, .. } => Some(decision_depth),
+            _ => None,
+        })
+        .max()
+        .map_or(0, |max| usize::from(max) + 1);
+
     mir_body.function_coverage_info = Some(Box::new(FunctionCoverageInfo {
         function_source_hash: hir_info.function_source_hash,
         num_counters: coverage_counters.num_counters(),
+        mcdc_bitmap_bytes: coverage_spans.test_vector_bitmap_bytes(),
         expressions: coverage_counters.into_expressions(),
         mappings,
+        mcdc_num_condition_bitmaps,
     }));
 }
 
@@ -136,20 +150,48 @@ fn create_mappings<'tcx>(
             .as_term()
     };
 
-    coverage_spans
-        .all_bcb_mappings()
-        .filter_map(|&BcbMapping { kind: bcb_mapping_kind, span }| {
-            let kind = match bcb_mapping_kind {
+    let mut mappings = Vec::new();
+
+    mappings.extend(coverage_spans.mappings.iter().filter_map(
+        |BcbMapping { kind: bcb_mapping_kind, span }| {
+            let kind = match *bcb_mapping_kind {
                 BcbMappingKind::Code(bcb) => MappingKind::Code(term_for_bcb(bcb)),
-                BcbMappingKind::Branch { true_bcb, false_bcb } => MappingKind::Branch {
+                BcbMappingKind::MCDCBranch {
+                    true_bcb, false_bcb, condition_info: None, ..
+                } => MappingKind::Branch {
                     true_term: term_for_bcb(true_bcb),
                     false_term: term_for_bcb(false_bcb),
                 },
+                BcbMappingKind::MCDCBranch {
+                    true_bcb,
+                    false_bcb,
+                    condition_info: Some(mcdc_params),
+                    ..
+                } => MappingKind::MCDCBranch {
+                    true_term: term_for_bcb(true_bcb),
+                    false_term: term_for_bcb(false_bcb),
+                    mcdc_params,
+                },
+                BcbMappingKind::MCDCDecision { bitmap_idx, conditions_num, .. } => {
+                    MappingKind::MCDCDecision(DecisionInfo { bitmap_idx, conditions_num })
+                }
             };
+            let code_region = make_code_region(source_map, file_name, *span, body_span)?;
+            Some(Mapping { kind, code_region })
+        },
+    ));
+
+    mappings.extend(coverage_spans.branch_pairs.iter().filter_map(
+        |&BcbBranchPair { span, true_bcb, false_bcb }| {
+            let true_term = term_for_bcb(true_bcb);
+            let false_term = term_for_bcb(false_bcb);
+            let kind = MappingKind::Branch { true_term, false_term };
             let code_region = make_code_region(source_map, file_name, span, body_span)?;
             Some(Mapping { kind, code_region })
-        })
-        .collect::<Vec<_>>()
+        },
+    ));
+
+    mappings
 }
 
 /// For each BCB node or BCB edge that has an associated coverage counter,
@@ -204,6 +246,59 @@ fn inject_coverage_statements<'tcx>(
     }
 }
 
+/// For each conditions inject statements to update condition bitmap after it has been evaluated.
+/// For each decision inject statements to update test vector bitmap after it has been evaluated.
+fn inject_mcdc_statements<'tcx>(
+    mir_body: &mut mir::Body<'tcx>,
+    basic_coverage_blocks: &CoverageGraph,
+    coverage_spans: &CoverageSpans,
+) {
+    if coverage_spans.test_vector_bitmap_bytes() == 0 {
+        return;
+    }
+
+    // Inject test vector update first because `inject_statement` always insert new statement at head.
+    for (end_bcbs, bitmap_idx, decision_depth) in
+        coverage_spans.mappings.iter().filter_map(|mapping| match &mapping.kind {
+            BcbMappingKind::MCDCDecision { end_bcbs, bitmap_idx, decision_depth, .. } => {
+                Some((end_bcbs, *bitmap_idx, *decision_depth))
+            }
+            _ => None,
+        })
+    {
+        for end in end_bcbs {
+            let end_bb = basic_coverage_blocks[*end].leader_bb();
+            inject_statement(
+                mir_body,
+                CoverageKind::TestVectorBitmapUpdate { bitmap_idx, decision_depth },
+                end_bb,
+            );
+        }
+    }
+
+    for (true_bcb, false_bcb, condition_id, decision_depth) in
+        coverage_spans.mappings.iter().filter_map(|mapping| match mapping.kind {
+            BcbMappingKind::MCDCBranch { true_bcb, false_bcb, condition_info, decision_depth } => {
+                Some((true_bcb, false_bcb, condition_info?.condition_id, decision_depth))
+            }
+            _ => None,
+        })
+    {
+        let true_bb = basic_coverage_blocks[true_bcb].leader_bb();
+        inject_statement(
+            mir_body,
+            CoverageKind::CondBitmapUpdate { id: condition_id, value: true, decision_depth },
+            true_bb,
+        );
+        let false_bb = basic_coverage_blocks[false_bcb].leader_bb();
+        inject_statement(
+            mir_body,
+            CoverageKind::CondBitmapUpdate { id: condition_id, value: false, decision_depth },
+            false_bb,
+        );
+    }
+}
+
 /// Given two basic blocks that have a control-flow edge between them, creates
 /// and returns a new block that sits between those blocks.
 fn inject_edge_counter_basic_block(
diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs
index 65715253647..f77ee63d02c 100644
--- a/compiler/rustc_mir_transform/src/coverage/query.rs
+++ b/compiler/rustc_mir_transform/src/coverage/query.rs
@@ -61,7 +61,17 @@ fn coverage_ids_info<'tcx>(
         .max()
         .unwrap_or(CounterId::ZERO);
 
-    CoverageIdsInfo { max_counter_id }
+    let mcdc_bitmap_bytes = mir_body
+        .coverage_branch_info
+        .as_deref()
+        .map(|info| {
+            info.mcdc_decision_spans
+                .iter()
+                .fold(0, |acc, decision| acc + (1_u32 << decision.conditions_num).div_ceil(8))
+        })
+        .unwrap_or_default();
+
+    CoverageIdsInfo { max_counter_id, mcdc_bitmap_bytes }
 }
 
 fn all_coverage_in_mir_body<'a, 'tcx>(
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
index 672de1fbe60..d6432e2e9d4 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -1,101 +1,31 @@
-use rustc_data_structures::graph::WithNumNodes;
-use rustc_index::bit_set::BitSet;
 use rustc_middle::mir;
 use rustc_span::{BytePos, Span};
 
-use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph, START_BCB};
+use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph};
+use crate::coverage::mappings::{BcbMapping, BcbMappingKind};
 use crate::coverage::spans::from_mir::SpanFromMir;
 use crate::coverage::ExtractedHirInfo;
 
 mod from_mir;
 
-#[derive(Clone, Copy, Debug)]
-pub(super) enum BcbMappingKind {
-    /// Associates an ordinary executable code span with its corresponding BCB.
-    Code(BasicCoverageBlock),
-    /// Associates a branch span with BCBs for its true and false arms.
-    Branch { true_bcb: BasicCoverageBlock, false_bcb: BasicCoverageBlock },
-}
-
-#[derive(Debug)]
-pub(super) struct BcbMapping {
-    pub(super) kind: BcbMappingKind,
-    pub(super) span: Span,
-}
-
-pub(super) struct CoverageSpans {
-    bcb_has_mappings: BitSet<BasicCoverageBlock>,
-    mappings: Vec<BcbMapping>,
-}
-
-impl CoverageSpans {
-    pub(super) fn bcb_has_coverage_spans(&self, bcb: BasicCoverageBlock) -> bool {
-        self.bcb_has_mappings.contains(bcb)
-    }
-
-    pub(super) fn all_bcb_mappings(&self) -> impl Iterator<Item = &BcbMapping> {
-        self.mappings.iter()
-    }
-}
+// FIXME(#124545) It's awkward that we have to re-export this, because it's an
+// internal detail of `from_mir` that is also needed when handling branch and
+// MC/DC spans. Ideally we would find a more natural home for it.
+pub(super) use from_mir::unexpand_into_body_span_with_visible_macro;
 
-/// Extracts coverage-relevant spans from MIR, and associates them with
-/// their corresponding BCBs.
-///
-/// Returns `None` if no coverage-relevant spans could be extracted.
-pub(super) fn generate_coverage_spans(
+pub(super) fn extract_refined_covspans(
     mir_body: &mir::Body<'_>,
     hir_info: &ExtractedHirInfo,
     basic_coverage_blocks: &CoverageGraph,
-) -> Option<CoverageSpans> {
-    let mut mappings = vec![];
-
-    if hir_info.is_async_fn {
-        // An async function desugars into a function that returns a future,
-        // with the user code wrapped in a closure. Any spans in the desugared
-        // outer function will be unhelpful, so just keep the signature span
-        // and ignore all of the spans in the MIR body.
-        if let Some(span) = hir_info.fn_sig_span_extended {
-            mappings.push(BcbMapping { kind: BcbMappingKind::Code(START_BCB), span });
-        }
-    } else {
-        let sorted_spans = from_mir::mir_to_initial_sorted_coverage_spans(
-            mir_body,
-            hir_info,
-            basic_coverage_blocks,
-        );
-        let coverage_spans = SpansRefiner::refine_sorted_spans(sorted_spans);
-        mappings.extend(coverage_spans.into_iter().map(|RefinedCovspan { bcb, span, .. }| {
-            // Each span produced by the generator represents an ordinary code region.
-            BcbMapping { kind: BcbMappingKind::Code(bcb), span }
-        }));
-
-        mappings.extend(from_mir::extract_branch_mappings(
-            mir_body,
-            hir_info.body_span,
-            basic_coverage_blocks,
-        ));
-    }
-
-    if mappings.is_empty() {
-        return None;
-    }
-
-    // Identify which BCBs have one or more mappings.
-    let mut bcb_has_mappings = BitSet::new_empty(basic_coverage_blocks.num_nodes());
-    let mut insert = |bcb| {
-        bcb_has_mappings.insert(bcb);
-    };
-    for &BcbMapping { kind, span: _ } in &mappings {
-        match kind {
-            BcbMappingKind::Code(bcb) => insert(bcb),
-            BcbMappingKind::Branch { true_bcb, false_bcb } => {
-                insert(true_bcb);
-                insert(false_bcb);
-            }
-        }
-    }
-
-    Some(CoverageSpans { bcb_has_mappings, mappings })
+    mappings: &mut impl Extend<BcbMapping>,
+) {
+    let sorted_spans =
+        from_mir::mir_to_initial_sorted_coverage_spans(mir_body, hir_info, basic_coverage_blocks);
+    let coverage_spans = SpansRefiner::refine_sorted_spans(sorted_spans);
+    mappings.extend(coverage_spans.into_iter().map(|RefinedCovspan { bcb, span, .. }| {
+        // Each span produced by the generator represents an ordinary code region.
+        BcbMapping { kind: BcbMappingKind::Code(bcb), span }
+    }));
 }
 
 #[derive(Debug)]
diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
index adb0c9f1929..4ce37b5defc 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
@@ -1,9 +1,8 @@
 use rustc_data_structures::captures::Captures;
 use rustc_data_structures::fx::FxHashSet;
-use rustc_index::IndexVec;
-use rustc_middle::mir::coverage::{BlockMarkerId, BranchSpan, CoverageKind};
+use rustc_middle::mir::coverage::CoverageKind;
 use rustc_middle::mir::{
-    self, AggregateKind, BasicBlock, FakeReadCause, Rvalue, Statement, StatementKind, Terminator,
+    self, AggregateKind, FakeReadCause, Rvalue, Statement, StatementKind, Terminator,
     TerminatorKind,
 };
 use rustc_span::{ExpnKind, MacroKind, Span, Symbol};
@@ -11,7 +10,6 @@ use rustc_span::{ExpnKind, MacroKind, Span, Symbol};
 use crate::coverage::graph::{
     BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph, START_BCB,
 };
-use crate::coverage::spans::{BcbMapping, BcbMappingKind};
 use crate::coverage::ExtractedHirInfo;
 
 /// Traverses the MIR body to produce an initial collection of coverage-relevant
@@ -227,7 +225,10 @@ fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
 
         // These coverage statements should not exist prior to coverage instrumentation.
         StatementKind::Coverage(
-            CoverageKind::CounterIncrement { .. } | CoverageKind::ExpressionUsed { .. },
+            CoverageKind::CounterIncrement { .. }
+            | CoverageKind::ExpressionUsed { .. }
+            | CoverageKind::CondBitmapUpdate { .. }
+            | CoverageKind::TestVectorBitmapUpdate { .. },
         ) => bug!(
             "Unexpected coverage statement found during coverage instrumentation: {statement:?}"
         ),
@@ -282,7 +283,7 @@ fn filtered_terminator_span(terminator: &Terminator<'_>) -> Option<Span> {
 ///
 /// [^1]Expansions result from Rust syntax including macros, syntactic sugar,
 /// etc.).
-fn unexpand_into_body_span_with_visible_macro(
+pub(crate) fn unexpand_into_body_span_with_visible_macro(
     original_span: Span,
     body_span: Span,
 ) -> Option<(Span, Option<Symbol>)> {
@@ -360,48 +361,3 @@ impl SpanFromMir {
         Self { span, visible_macro, bcb, is_hole }
     }
 }
-
-pub(super) fn extract_branch_mappings(
-    mir_body: &mir::Body<'_>,
-    body_span: Span,
-    basic_coverage_blocks: &CoverageGraph,
-) -> Vec<BcbMapping> {
-    let Some(branch_info) = mir_body.coverage_branch_info.as_deref() else {
-        return vec![];
-    };
-
-    let mut block_markers = IndexVec::<BlockMarkerId, Option<BasicBlock>>::from_elem_n(
-        None,
-        branch_info.num_block_markers,
-    );
-
-    // Fill out the mapping from block marker IDs to their enclosing blocks.
-    for (bb, data) in mir_body.basic_blocks.iter_enumerated() {
-        for statement in &data.statements {
-            if let StatementKind::Coverage(CoverageKind::BlockMarker { id }) = statement.kind {
-                block_markers[id] = Some(bb);
-            }
-        }
-    }
-
-    branch_info
-        .branch_spans
-        .iter()
-        .filter_map(|&BranchSpan { span: raw_span, true_marker, false_marker }| {
-            // For now, ignore any branch span that was introduced by
-            // expansion. This makes things like assert macros less noisy.
-            if !raw_span.ctxt().outer_expn_data().is_root() {
-                return None;
-            }
-            let (span, _) = unexpand_into_body_span_with_visible_macro(raw_span, body_span)?;
-
-            let bcb_from_marker =
-                |marker: BlockMarkerId| basic_coverage_blocks.bcb_from_bb(block_markers[marker]?);
-
-            let true_bcb = bcb_from_marker(true_marker)?;
-            let false_bcb = bcb_from_marker(false_marker)?;
-
-            Some(BcbMapping { kind: BcbMappingKind::Branch { true_bcb, false_bcb }, span })
-        })
-        .collect::<Vec<_>>()
-}
diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs
index 569998de35e..cf1a2b399f9 100644
--- a/compiler/rustc_mir_transform/src/coverage/tests.rs
+++ b/compiler/rustc_mir_transform/src/coverage/tests.rs
@@ -28,8 +28,7 @@ use super::counters;
 use super::graph::{self, BasicCoverageBlock};
 
 use itertools::Itertools;
-use rustc_data_structures::graph::WithNumNodes;
-use rustc_data_structures::graph::WithSuccessors;
+use rustc_data_structures::graph::{DirectedGraph, Successors};
 use rustc_index::{Idx, IndexVec};
 use rustc_middle::mir::*;
 use rustc_middle::ty;
diff --git a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs
index ca63f5550ae..370e930b740 100644
--- a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs
+++ b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs
@@ -160,7 +160,7 @@ pub fn deduced_param_attrs<'tcx>(
         return &[];
     }
 
-    // If the Freeze language item isn't present, then don't bother.
+    // If the Freeze lang item isn't present, then don't bother.
     if tcx.lang_items().freeze_trait().is_none() {
         return &[];
     }
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index d4f736d2a50..4dd595ce1e1 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -121,7 +121,7 @@ impl<'tcx> MirPass<'tcx> for GVN {
 
 fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
-    let ssa = SsaLocals::new(body);
+    let ssa = SsaLocals::new(tcx, body, param_env);
     // Clone dominators as we need them while mutating the body.
     let dominators = body.basic_blocks.dominators().clone();
 
@@ -724,6 +724,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
         // Invariant: `value` holds the value up-to the `index`th projection excluded.
         let mut value = self.locals[place.local]?;
         for (index, proj) in place.projection.iter().enumerate() {
+            if let Value::Projection(pointer, ProjectionElem::Deref) = *self.get(value)
+                && let Value::Address { place: mut pointee, kind, .. } = *self.get(pointer)
+                && let AddressKind::Ref(BorrowKind::Shared) = kind
+                && let Some(v) = self.simplify_place_value(&mut pointee, location)
+            {
+                value = v;
+                place_ref = pointee.project_deeper(&place.projection[index..], self.tcx).as_ref();
+            }
             if let Some(local) = self.try_as_local(value, location) {
                 // Both `local` and `Place { local: place.local, projection: projection[..index] }`
                 // hold the same value. Therefore, following place holds the value in the original
@@ -735,6 +743,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             value = self.project(base, value, proj)?;
         }
 
+        if let Value::Projection(pointer, ProjectionElem::Deref) = *self.get(value)
+            && let Value::Address { place: mut pointee, kind, .. } = *self.get(pointer)
+            && let AddressKind::Ref(BorrowKind::Shared) = kind
+            && let Some(v) = self.simplify_place_value(&mut pointee, location)
+        {
+            value = v;
+            place_ref = pointee.project_deeper(&[], self.tcx).as_ref();
+        }
         if let Some(new_local) = self.try_as_local(value, location) {
             place_ref = PlaceRef { local: new_local, projection: &[] };
         }
@@ -885,6 +901,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 AggregateKind::Adt(did, ..) => tcx.def_kind(did) != DefKind::Enum,
                 // Coroutines are never ZST, as they at least contain the implicit states.
                 AggregateKind::Coroutine(..) => false,
+                AggregateKind::RawPtr(..) => bug!("MIR for RawPtr aggregate must have 2 fields"),
             };
 
             if is_zst {
@@ -910,6 +927,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             }
             // Do not track unions.
             AggregateKind::Adt(_, _, _, _, Some(_)) => return None,
+            // FIXME: Do the extra work to GVN `from_raw_parts`
+            AggregateKind::RawPtr(..) => return None,
         };
 
         let fields: Option<Vec<_>> = fields
@@ -1202,7 +1221,7 @@ impl<'tcx> VnState<'_, 'tcx> {
             // not give the same value as the former mention.
             && value.is_deterministic()
         {
-            return Some(ConstOperand { span: rustc_span::DUMMY_SP, user_ty: None, const_: value });
+            return Some(ConstOperand { span: DUMMY_SP, user_ty: None, const_: value });
         }
 
         let op = self.evaluated[index].as_ref()?;
@@ -1219,7 +1238,7 @@ impl<'tcx> VnState<'_, 'tcx> {
         assert!(!value.may_have_provenance(self.tcx, op.layout.size));
 
         let const_ = Const::Val(value, op.layout.ty);
-        Some(ConstOperand { span: rustc_span::DUMMY_SP, user_ty: None, const_ })
+        Some(ConstOperand { span: DUMMY_SP, user_ty: None, const_ })
     }
 
     /// If there is a local which is assigned `index`, and its assignment strictly dominates `loc`,
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index 60513a674af..7a3b08f82f6 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -11,7 +11,7 @@ use rustc_middle::mir::visit::*;
 use rustc_middle::mir::*;
 use rustc_middle::ty::TypeVisitableExt;
 use rustc_middle::ty::{self, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
-use rustc_session::config::OptLevel;
+use rustc_session::config::{DebugInfo, OptLevel};
 use rustc_span::source_map::Spanned;
 use rustc_span::sym;
 use rustc_target::abi::FieldIdx;
@@ -332,7 +332,8 @@ impl<'tcx> Inliner<'tcx> {
             | InstanceDef::DropGlue(..)
             | InstanceDef::CloneShim(..)
             | InstanceDef::ThreadLocalShim(..)
-            | InstanceDef::FnPtrAddrShim(..) => return Ok(()),
+            | InstanceDef::FnPtrAddrShim(..)
+            | InstanceDef::AsyncDropGlueCtorShim(..) => return Ok(()),
         }
 
         if self.tcx.is_constructor(callee_def_id) {
@@ -699,7 +700,19 @@ impl<'tcx> Inliner<'tcx> {
         // Insert all of the (mapped) parts of the callee body into the caller.
         caller_body.local_decls.extend(callee_body.drain_vars_and_temps());
         caller_body.source_scopes.extend(&mut callee_body.source_scopes.drain(..));
-        caller_body.var_debug_info.append(&mut callee_body.var_debug_info);
+        if self
+            .tcx
+            .sess
+            .opts
+            .unstable_opts
+            .inline_mir_preserve_debug
+            .unwrap_or(self.tcx.sess.opts.debuginfo != DebugInfo::None)
+        {
+            // Note that we need to preserve these in the standard library so that
+            // people working on rust can build with or without debuginfo while
+            // still getting consistent results from the mir-opt tests.
+            caller_body.var_debug_info.append(&mut callee_body.var_debug_info);
+        }
         caller_body.basic_blocks_mut().extend(callee_body.basic_blocks_mut().drain(..));
 
         caller_body[callsite.block].terminator = Some(Terminator {
@@ -707,18 +720,12 @@ impl<'tcx> Inliner<'tcx> {
             kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) },
         });
 
-        // Copy only unevaluated constants from the callee_body into the caller_body.
-        // Although we are only pushing `ConstKind::Unevaluated` consts to
-        // `required_consts`, here we may not only have `ConstKind::Unevaluated`
-        // because we are calling `instantiate_and_normalize_erasing_regions`.
-        caller_body.required_consts.extend(callee_body.required_consts.iter().copied().filter(
-            |&ct| match ct.const_ {
-                Const::Ty(_) => {
-                    bug!("should never encounter ty::UnevaluatedConst in `required_consts`")
-                }
-                Const::Val(..) | Const::Unevaluated(..) => true,
-            },
-        ));
+        // Copy required constants from the callee_body into the caller_body. Although we are only
+        // pushing unevaluated consts to `required_consts`, here they may have been evaluated
+        // because we are calling `instantiate_and_normalize_erasing_regions` -- so we filter again.
+        caller_body.required_consts.extend(
+            callee_body.required_consts.into_iter().filter(|ct| ct.const_.is_required_const()),
+        );
         // Now that we incorporated the callee's `required_consts`, we can remove the callee from
         // `mentioned_items` -- but we have to take their `mentioned_items` in return. This does
         // some extra work here to save the monomorphization collector work later. It helps a lot,
@@ -734,8 +741,9 @@ impl<'tcx> Inliner<'tcx> {
             caller_body.mentioned_items.remove(idx);
             caller_body.mentioned_items.extend(callee_body.mentioned_items);
         } else {
-            // If we can't find the callee, there's no point in adding its items.
-            // Probably it already got removed by being inlined elsewhere in the same function.
+            // If we can't find the callee, there's no point in adding its items. Probably it
+            // already got removed by being inlined elsewhere in the same function, so we already
+            // took its items.
         }
     }
 
@@ -1071,7 +1079,8 @@ fn try_instance_mir<'tcx>(
     tcx: TyCtxt<'tcx>,
     instance: InstanceDef<'tcx>,
 ) -> Result<&'tcx Body<'tcx>, &'static str> {
-    if let ty::InstanceDef::DropGlue(_, Some(ty)) = instance
+    if let ty::InstanceDef::DropGlue(_, Some(ty))
+    | ty::InstanceDef::AsyncDropGlueCtorShim(_, Some(ty)) = instance
         && let ty::Adt(def, args) = ty.kind()
     {
         let fields = def.all_fields();
diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs
index 99c7b616f1b..8c5f965108b 100644
--- a/compiler/rustc_mir_transform/src/inline/cycle.rs
+++ b/compiler/rustc_mir_transform/src/inline/cycle.rs
@@ -94,8 +94,10 @@ pub(crate) fn mir_callgraph_reachable<'tcx>(
                 | InstanceDef::CloneShim(..) => {}
 
                 // This shim does not call any other functions, thus there can be no recursion.
-                InstanceDef::FnPtrAddrShim(..) => continue,
-                InstanceDef::DropGlue(..) => {
+                InstanceDef::FnPtrAddrShim(..) => {
+                    continue;
+                }
+                InstanceDef::DropGlue(..) | InstanceDef::AsyncDropGlueCtorShim(..) => {
                     // FIXME: A not fully instantiated drop shim can cause ICEs if one attempts to
                     // have its MIR built. Likely oli-obk just screwed up the `ParamEnv`s, so this
                     // needs some more analysis.
diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs
index ff786d44d6a..fd768cc96ae 100644
--- a/compiler/rustc_mir_transform/src/instsimplify.rs
+++ b/compiler/rustc_mir_transform/src/instsimplify.rs
@@ -36,6 +36,7 @@ impl<'tcx> MirPass<'tcx> for InstSimplify {
                         ctx.simplify_bool_cmp(&statement.source_info, rvalue);
                         ctx.simplify_ref_deref(&statement.source_info, rvalue);
                         ctx.simplify_len(&statement.source_info, rvalue);
+                        ctx.simplify_ptr_aggregate(&statement.source_info, rvalue);
                         ctx.simplify_cast(rvalue);
                     }
                     _ => {}
@@ -58,8 +59,17 @@ struct InstSimplifyContext<'tcx, 'a> {
 
 impl<'tcx> InstSimplifyContext<'tcx, '_> {
     fn should_simplify(&self, source_info: &SourceInfo, rvalue: &Rvalue<'tcx>) -> bool {
+        self.should_simplify_custom(source_info, "Rvalue", rvalue)
+    }
+
+    fn should_simplify_custom(
+        &self,
+        source_info: &SourceInfo,
+        label: &str,
+        value: impl std::fmt::Debug,
+    ) -> bool {
         self.tcx.consider_optimizing(|| {
-            format!("InstSimplify - Rvalue: {rvalue:?} SourceInfo: {source_info:?}")
+            format!("InstSimplify - {label}: {value:?} SourceInfo: {source_info:?}")
         })
     }
 
@@ -111,7 +121,7 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
         if a.const_.ty().is_bool() { a.const_.try_to_bool() } else { None }
     }
 
-    /// Transform "&(*a)" ==> "a".
+    /// Transform `&(*a)` ==> `a`.
     fn simplify_ref_deref(&self, source_info: &SourceInfo, rvalue: &mut Rvalue<'tcx>) {
         if let Rvalue::Ref(_, _, place) = rvalue {
             if let Some((base, ProjectionElem::Deref)) = place.as_ref().last_projection() {
@@ -131,7 +141,7 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
         }
     }
 
-    /// Transform "Len([_; N])" ==> "N".
+    /// Transform `Len([_; N])` ==> `N`.
     fn simplify_len(&self, source_info: &SourceInfo, rvalue: &mut Rvalue<'tcx>) {
         if let Rvalue::Len(ref place) = *rvalue {
             let place_ty = place.ty(self.local_decls, self.tcx).ty;
@@ -147,6 +157,30 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
         }
     }
 
+    /// Transform `Aggregate(RawPtr, [p, ()])` ==> `Cast(PtrToPtr, p)`.
+    fn simplify_ptr_aggregate(&self, source_info: &SourceInfo, rvalue: &mut Rvalue<'tcx>) {
+        if let Rvalue::Aggregate(box AggregateKind::RawPtr(pointee_ty, mutability), fields) = rvalue
+        {
+            let meta_ty = fields.raw[1].ty(self.local_decls, self.tcx);
+            if meta_ty.is_unit() {
+                // The mutable borrows we're holding prevent printing `rvalue` here
+                if !self.should_simplify_custom(
+                    source_info,
+                    "Aggregate::RawPtr",
+                    (&pointee_ty, *mutability, &fields),
+                ) {
+                    return;
+                }
+
+                let mut fields = std::mem::take(fields);
+                let _meta = fields.pop().unwrap();
+                let data = fields.pop().unwrap();
+                let ptr_ty = Ty::new_ptr(self.tcx, *pointee_ty, *mutability);
+                *rvalue = Rvalue::Cast(CastKind::PtrToPtr, data, ptr_ty);
+            }
+        }
+    }
+
     fn simplify_ub_check(&self, source_info: &SourceInfo, rvalue: &mut Rvalue<'tcx>) {
         if let Rvalue::NullaryOp(NullOp::UbChecks, _) = *rvalue {
             let const_ = Const::from_bool(self.tcx, self.tcx.sess.ub_checks());
diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs
index 2218154ea5e..90c1c7ba63b 100644
--- a/compiler/rustc_mir_transform/src/known_panics_lint.rs
+++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs
@@ -583,32 +583,21 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                 val.into()
             }
 
-            Aggregate(ref kind, ref fields) => {
-                // Do not const prop union fields as they can be
-                // made to produce values that don't match their
-                // underlying layout's type (see ICE #121534).
-                // If the last element of the `Adt` tuple
-                // is `Some` it indicates the ADT is a union
-                if let AggregateKind::Adt(_, _, _, _, Some(_)) = **kind {
-                    return None;
-                };
-                Value::Aggregate {
-                    fields: fields
-                        .iter()
-                        .map(|field| {
-                            self.eval_operand(field).map_or(Value::Uninit, Value::Immediate)
-                        })
-                        .collect(),
-                    variant: match **kind {
-                        AggregateKind::Adt(_, variant, _, _, _) => variant,
-                        AggregateKind::Array(_)
-                        | AggregateKind::Tuple
-                        | AggregateKind::Closure(_, _)
-                        | AggregateKind::Coroutine(_, _)
-                        | AggregateKind::CoroutineClosure(_, _) => VariantIdx::ZERO,
-                    },
-                }
-            }
+            Aggregate(ref kind, ref fields) => Value::Aggregate {
+                fields: fields
+                    .iter()
+                    .map(|field| self.eval_operand(field).map_or(Value::Uninit, Value::Immediate))
+                    .collect(),
+                variant: match **kind {
+                    AggregateKind::Adt(_, variant, _, _, _) => variant,
+                    AggregateKind::Array(_)
+                    | AggregateKind::Tuple
+                    | AggregateKind::RawPtr(_, _)
+                    | AggregateKind::Closure(_, _)
+                    | AggregateKind::Coroutine(_, _)
+                    | AggregateKind::CoroutineClosure(_, _) => VariantIdx::ZERO,
+                },
+            },
 
             Repeat(ref op, n) => {
                 trace!(?op, ?n);
@@ -796,7 +785,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
                 if let Some(ref value) = self.eval_operand(discr)
                     && let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value))
                     && let Ok(constant) = value_const.try_to_int()
-                    && let Ok(constant) = constant.to_bits(constant.size())
+                    && let Ok(constant) = constant.try_to_bits(constant.size())
                 {
                     // We managed to evaluate the discriminant, so we know we only need to visit
                     // one target.
@@ -895,13 +884,20 @@ impl CanConstProp {
         };
         for (local, val) in cpv.can_const_prop.iter_enumerated_mut() {
             let ty = body.local_decls[local].ty;
-            match tcx.layout_of(param_env.and(ty)) {
-                Ok(layout) if layout.size < Size::from_bytes(MAX_ALLOC_LIMIT) => {}
-                // Either the layout fails to compute, then we can't use this local anyway
-                // or the local is too large, then we don't want to.
-                _ => {
-                    *val = ConstPropMode::NoPropagation;
-                    continue;
+            if ty.is_union() {
+                // Unions are incompatible with the current implementation of
+                // const prop because Rust has no concept of an active
+                // variant of a union
+                *val = ConstPropMode::NoPropagation;
+            } else {
+                match tcx.layout_of(param_env.and(ty)) {
+                    Ok(layout) if layout.size < Size::from_bytes(MAX_ALLOC_LIMIT) => {}
+                    // Either the layout fails to compute, then we can't use this local anyway
+                    // or the local is too large, then we don't want to.
+                    _ => {
+                        *val = ConstPropMode::NoPropagation;
+                        continue;
+                    }
                 }
             }
         }
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
index e477c068229..e69c5da757e 100644
--- a/compiler/rustc_mir_transform/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -4,7 +4,6 @@
 #![feature(cow_is_borrowed)]
 #![feature(decl_macro)]
 #![feature(impl_trait_in_assoc_type)]
-#![feature(inline_const)]
 #![feature(is_sorted)]
 #![feature(let_chains)]
 #![feature(map_try_insert)]
@@ -333,6 +332,8 @@ fn mir_promoted(
         body.tainted_by_errors = Some(error_reported);
     }
 
+    // Collect `required_consts` *before* promotion, so if there are any consts being promoted
+    // we still add them to the list in the outer MIR body.
     let mut required_consts = Vec::new();
     let mut required_consts_visitor = RequiredConstsVisitor::new(&mut required_consts);
     for (bb, bb_data) in traversal::reverse_postorder(&body) {
diff --git a/compiler/rustc_mir_transform/src/lower_intrinsics.rs b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
index 7e8920604c1..da63fcf23d9 100644
--- a/compiler/rustc_mir_transform/src/lower_intrinsics.rs
+++ b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
@@ -287,6 +287,34 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
                             terminator.kind = TerminatorKind::Unreachable;
                         }
                     }
+                    sym::aggregate_raw_ptr => {
+                        let Ok([data, meta]) = <[_; 2]>::try_from(std::mem::take(args)) else {
+                            span_bug!(
+                                terminator.source_info.span,
+                                "Wrong number of arguments for aggregate_raw_ptr intrinsic",
+                            );
+                        };
+                        let target = target.unwrap();
+                        let pointer_ty = generic_args.type_at(0);
+                        let kind = if let ty::RawPtr(pointee_ty, mutability) = pointer_ty.kind() {
+                            AggregateKind::RawPtr(*pointee_ty, *mutability)
+                        } else {
+                            span_bug!(
+                                terminator.source_info.span,
+                                "Return type of aggregate_raw_ptr intrinsic must be a raw pointer",
+                            );
+                        };
+                        let fields = [data.node, meta.node];
+                        block.statements.push(Statement {
+                            source_info: terminator.source_info,
+                            kind: StatementKind::Assign(Box::new((
+                                *destination,
+                                Rvalue::Aggregate(Box::new(kind), fields.into()),
+                            ))),
+                        });
+
+                        terminator.kind = TerminatorKind::Goto { target };
+                    }
                     _ => {}
                 }
             }
diff --git a/compiler/rustc_mir_transform/src/lower_slice_len.rs b/compiler/rustc_mir_transform/src/lower_slice_len.rs
index 8137525a332..2267a621a83 100644
--- a/compiler/rustc_mir_transform/src/lower_slice_len.rs
+++ b/compiler/rustc_mir_transform/src/lower_slice_len.rs
@@ -21,7 +21,7 @@ impl<'tcx> MirPass<'tcx> for LowerSliceLenCalls {
 pub fn lower_slice_len_calls<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
     let language_items = tcx.lang_items();
     let Some(slice_len_fn_item_def_id) = language_items.slice_len_fn() else {
-        // there is no language item to compare to :)
+        // there is no lang item to compare to :)
         return;
     };
 
diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs
index 4d9a198eeb2..1411d9be223 100644
--- a/compiler/rustc_mir_transform/src/match_branches.rs
+++ b/compiler/rustc_mir_transform/src/match_branches.rs
@@ -41,7 +41,10 @@ impl<'tcx> MirPass<'tcx> for MatchBranchSimplification {
                 should_cleanup = true;
                 continue;
             }
-            if SimplifyToExp::default().simplify(tcx, body, bb_idx, param_env).is_some() {
+            // unsound: https://github.com/rust-lang/rust/issues/124150
+            if tcx.sess.opts.unstable_opts.unsound_mir_opts
+                && SimplifyToExp::default().simplify(tcx, body, bb_idx, param_env).is_some()
+            {
                 should_cleanup = true;
                 continue;
             }
@@ -369,8 +372,7 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToExp {
         }
 
         fn int_equal(l: ScalarInt, r: impl Into<u128>, size: Size) -> bool {
-            l.try_to_int(l.size()).unwrap()
-                == ScalarInt::try_from_uint(r, size).unwrap().try_to_int(size).unwrap()
+            l.assert_int(l.size()) == ScalarInt::try_from_uint(r, size).unwrap().assert_int(size)
         }
 
         // We first compare the two branches, and then the other branches need to fulfill the same conditions.
diff --git a/compiler/rustc_mir_transform/src/normalize_array_len.rs b/compiler/rustc_mir_transform/src/normalize_array_len.rs
index 128634bd7f2..c26a5461633 100644
--- a/compiler/rustc_mir_transform/src/normalize_array_len.rs
+++ b/compiler/rustc_mir_transform/src/normalize_array_len.rs
@@ -22,7 +22,8 @@ impl<'tcx> MirPass<'tcx> for NormalizeArrayLen {
 }
 
 fn normalize_array_len_calls<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-    let ssa = SsaLocals::new(body);
+    let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
+    let ssa = SsaLocals::new(tcx, body, param_env);
 
     let slice_lengths = compute_slice_length(tcx, &ssa, body);
     debug!(?slice_lengths);
diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs
index a9d4b860b7a..689a547689a 100644
--- a/compiler/rustc_mir_transform/src/promote_consts.rs
+++ b/compiler/rustc_mir_transform/src/promote_consts.rs
@@ -13,6 +13,7 @@
 //! move analysis runs after promotion on broken MIR.
 
 use either::{Left, Right};
+use rustc_data_structures::fx::FxHashSet;
 use rustc_hir as hir;
 use rustc_middle::mir;
 use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
@@ -175,6 +176,12 @@ fn collect_temps_and_candidates<'tcx>(
 struct Validator<'a, 'tcx> {
     ccx: &'a ConstCx<'a, 'tcx>,
     temps: &'a mut IndexSlice<Local, TempState>,
+    /// For backwards compatibility, we are promoting function calls in `const`/`static`
+    /// initializers. But we want to avoid evaluating code that might panic and that otherwise would
+    /// not have been evaluated, so we only promote such calls in basic blocks that are guaranteed
+    /// to execute. In other words, we only promote such calls in basic blocks that are definitely
+    /// not dead code. Here we cache the result of computing that set of basic blocks.
+    promotion_safe_blocks: Option<FxHashSet<BasicBlock>>,
 }
 
 impl<'a, 'tcx> std::ops::Deref for Validator<'a, 'tcx> {
@@ -260,7 +267,9 @@ impl<'tcx> Validator<'_, 'tcx> {
                     self.validate_rvalue(rhs)
                 }
                 Right(terminator) => match &terminator.kind {
-                    TerminatorKind::Call { func, args, .. } => self.validate_call(func, args),
+                    TerminatorKind::Call { func, args, .. } => {
+                        self.validate_call(func, args, loc.block)
+                    }
                     TerminatorKind::Yield { .. } => Err(Unpromotable),
                     kind => {
                         span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
@@ -384,7 +393,7 @@ impl<'tcx> Validator<'_, 'tcx> {
         match kind {
             // Reject these borrow types just to be safe.
             // FIXME(RalfJung): could we allow them? Should we? No point in it until we have a usecase.
-            BorrowKind::Fake | BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture } => {
+            BorrowKind::Fake(_) | BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture } => {
                 return Err(Unpromotable);
             }
 
@@ -490,14 +499,14 @@ impl<'tcx> Validator<'_, 'tcx> {
                                 }
                                 _ => None,
                             };
-                            match rhs_val.map(|x| x.try_to_uint(sz).unwrap()) {
+                            match rhs_val.map(|x| x.assert_uint(sz)) {
                                 // for the zero test, int vs uint does not matter
                                 Some(x) if x != 0 => {}        // okay
                                 _ => return Err(Unpromotable), // value not known or 0 -- not okay
                             }
                             // Furthermore, for signed divison, we also have to exclude `int::MIN / -1`.
                             if lhs_ty.is_signed() {
-                                match rhs_val.map(|x| x.try_to_int(sz).unwrap()) {
+                                match rhs_val.map(|x| x.assert_int(sz)) {
                                     Some(-1) | None => {
                                         // The RHS is -1 or unknown, so we have to be careful.
                                         // But is the LHS int::MIN?
@@ -508,7 +517,7 @@ impl<'tcx> Validator<'_, 'tcx> {
                                             _ => None,
                                         };
                                         let lhs_min = sz.signed_int_min();
-                                        match lhs_val.map(|x| x.try_to_int(sz).unwrap()) {
+                                        match lhs_val.map(|x| x.assert_int(sz)) {
                                             Some(x) if x != lhs_min => {}  // okay
                                             _ => return Err(Unpromotable), // value not known or int::MIN -- not okay
                                         }
@@ -588,29 +597,79 @@ impl<'tcx> Validator<'_, 'tcx> {
         Ok(())
     }
 
+    /// Computes the sets of blocks of this MIR that are definitely going to be executed
+    /// if the function returns successfully. That makes it safe to promote calls in them
+    /// that might fail.
+    fn promotion_safe_blocks(body: &mir::Body<'tcx>) -> FxHashSet<BasicBlock> {
+        let mut safe_blocks = FxHashSet::default();
+        let mut safe_block = START_BLOCK;
+        loop {
+            safe_blocks.insert(safe_block);
+            // Let's see if we can find another safe block.
+            safe_block = match body.basic_blocks[safe_block].terminator().kind {
+                TerminatorKind::Goto { target } => target,
+                TerminatorKind::Call { target: Some(target), .. }
+                | TerminatorKind::Drop { target, .. } => {
+                    // This calls a function or the destructor. `target` does not get executed if
+                    // the callee loops or panics. But in both cases the const already fails to
+                    // evaluate, so we are fine considering `target` a safe block for promotion.
+                    target
+                }
+                TerminatorKind::Assert { target, .. } => {
+                    // Similar to above, we only consider successful execution.
+                    target
+                }
+                _ => {
+                    // No next safe block.
+                    break;
+                }
+            };
+        }
+        safe_blocks
+    }
+
+    /// Returns whether the block is "safe" for promotion, which means it cannot be dead code.
+    /// We use this to avoid promoting operations that can fail in dead code.
+    fn is_promotion_safe_block(&mut self, block: BasicBlock) -> bool {
+        let body = self.body;
+        let safe_blocks =
+            self.promotion_safe_blocks.get_or_insert_with(|| Self::promotion_safe_blocks(body));
+        safe_blocks.contains(&block)
+    }
+
     fn validate_call(
         &mut self,
         callee: &Operand<'tcx>,
         args: &[Spanned<Operand<'tcx>>],
+        block: BasicBlock,
     ) -> Result<(), Unpromotable> {
+        // Validate the operands. If they fail, there's no question -- we cannot promote.
+        self.validate_operand(callee)?;
+        for arg in args {
+            self.validate_operand(&arg.node)?;
+        }
+
+        // Functions marked `#[rustc_promotable]` are explicitly allowed to be promoted, so we can
+        // accept them at this point.
         let fn_ty = callee.ty(self.body, self.tcx);
+        if let ty::FnDef(def_id, _) = *fn_ty.kind() {
+            if self.tcx.is_promotable_const_fn(def_id) {
+                return Ok(());
+            }
+        }
 
-        // Inside const/static items, we promote all (eligible) function calls.
-        // Everywhere else, we require `#[rustc_promotable]` on the callee.
-        let promote_all_const_fn = matches!(
+        // Ideally, we'd stop here and reject the rest.
+        // But for backward compatibility, we have to accept some promotion in const/static
+        // initializers. Inline consts are explicitly excluded, they are more recent so we have no
+        // backwards compatibility reason to allow more promotion inside of them.
+        let promote_all_fn = matches!(
             self.const_kind,
             Some(hir::ConstContext::Static(_) | hir::ConstContext::Const { inline: false })
         );
-        if !promote_all_const_fn {
-            if let ty::FnDef(def_id, _) = *fn_ty.kind() {
-                // Never promote runtime `const fn` calls of
-                // functions without `#[rustc_promotable]`.
-                if !self.tcx.is_promotable_const_fn(def_id) {
-                    return Err(Unpromotable);
-                }
-            }
+        if !promote_all_fn {
+            return Err(Unpromotable);
         }
-
+        // Make sure the callee is a `const fn`.
         let is_const_fn = match *fn_ty.kind() {
             ty::FnDef(def_id, _) => self.tcx.is_const_fn_raw(def_id),
             _ => false,
@@ -618,23 +677,23 @@ impl<'tcx> Validator<'_, 'tcx> {
         if !is_const_fn {
             return Err(Unpromotable);
         }
-
-        self.validate_operand(callee)?;
-        for arg in args {
-            self.validate_operand(&arg.node)?;
+        // The problem is, this may promote calls to functions that panic.
+        // We don't want to introduce compilation errors if there's a panic in a call in dead code.
+        // So we ensure that this is not dead code.
+        if !self.is_promotion_safe_block(block) {
+            return Err(Unpromotable);
         }
-
+        // This passed all checks, so let's accept.
         Ok(())
     }
 }
 
-// FIXME(eddyb) remove the differences for promotability in `static`, `const`, `const fn`.
 fn validate_candidates(
     ccx: &ConstCx<'_, '_>,
     temps: &mut IndexSlice<Local, TempState>,
     candidates: &[Candidate],
 ) -> Vec<Candidate> {
-    let mut validator = Validator { ccx, temps };
+    let mut validator = Validator { ccx, temps, promotion_safe_blocks: None };
 
     candidates
         .iter()
@@ -653,6 +712,10 @@ struct Promoter<'a, 'tcx> {
     /// If true, all nested temps are also kept in the
     /// source MIR, not moved to the promoted MIR.
     keep_original: bool,
+
+    /// If true, add the new const (the promoted) to the required_consts of the parent MIR.
+    /// This is initially false and then set by the visitor when it encounters a `Call` terminator.
+    add_to_required: bool,
 }
 
 impl<'a, 'tcx> Promoter<'a, 'tcx> {
@@ -755,6 +818,10 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
                 TerminatorKind::Call {
                     mut func, mut args, call_source: desugar, fn_span, ..
                 } => {
+                    // This promoted involves a function call, so it may fail to evaluate.
+                    // Let's make sure it is added to `required_consts` so that that failure cannot get lost.
+                    self.add_to_required = true;
+
                     self.visit_operand(&mut func, loc);
                     for arg in &mut args {
                         self.visit_operand(&mut arg.node, loc);
@@ -789,7 +856,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
 
     fn promote_candidate(mut self, candidate: Candidate, next_promoted_id: usize) -> Body<'tcx> {
         let def = self.source.source.def_id();
-        let mut rvalue = {
+        let (mut rvalue, promoted_op) = {
             let promoted = &mut self.promoted;
             let promoted_id = Promoted::new(next_promoted_id);
             let tcx = self.tcx;
@@ -799,11 +866,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
                 let args = tcx.erase_regions(GenericArgs::identity_for_item(tcx, def));
                 let uneval = mir::UnevaluatedConst { def, args, promoted: Some(promoted_id) };
 
-                Operand::Constant(Box::new(ConstOperand {
-                    span,
-                    user_ty: None,
-                    const_: Const::Unevaluated(uneval, ty),
-                }))
+                ConstOperand { span, user_ty: None, const_: Const::Unevaluated(uneval, ty) }
             };
 
             let blocks = self.source.basic_blocks.as_mut();
@@ -836,22 +899,26 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
             let promoted_ref = local_decls.push(promoted_ref);
             assert_eq!(self.temps.push(TempState::Unpromotable), promoted_ref);
 
+            let promoted_operand = promoted_operand(ref_ty, span);
             let promoted_ref_statement = Statement {
                 source_info: statement.source_info,
                 kind: StatementKind::Assign(Box::new((
                     Place::from(promoted_ref),
-                    Rvalue::Use(promoted_operand(ref_ty, span)),
+                    Rvalue::Use(Operand::Constant(Box::new(promoted_operand))),
                 ))),
             };
             self.extra_statements.push((loc, promoted_ref_statement));
 
-            Rvalue::Ref(
-                tcx.lifetimes.re_erased,
-                *borrow_kind,
-                Place {
-                    local: mem::replace(&mut place.local, promoted_ref),
-                    projection: List::empty(),
-                },
+            (
+                Rvalue::Ref(
+                    tcx.lifetimes.re_erased,
+                    *borrow_kind,
+                    Place {
+                        local: mem::replace(&mut place.local, promoted_ref),
+                        projection: List::empty(),
+                    },
+                ),
+                promoted_operand,
             )
         };
 
@@ -863,6 +930,12 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
 
         let span = self.promoted.span;
         self.assign(RETURN_PLACE, rvalue, span);
+
+        // Now that we did promotion, we know whether we'll want to add this to `required_consts`.
+        if self.add_to_required {
+            self.source.required_consts.push(promoted_op);
+        }
+
         self.promoted
     }
 }
@@ -878,6 +951,14 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Promoter<'a, 'tcx> {
             *local = self.promote_temp(*local);
         }
     }
+
+    fn visit_constant(&mut self, constant: &mut ConstOperand<'tcx>, _location: Location) {
+        if constant.const_.is_required_const() {
+            self.promoted.required_consts.push(*constant);
+        }
+
+        // Skipping `super_constant` as the visitor is otherwise only looking for locals.
+    }
 }
 
 fn promote_candidates<'tcx>(
@@ -931,8 +1012,10 @@ fn promote_candidates<'tcx>(
             temps: &mut temps,
             extra_statements: &mut extra_statements,
             keep_original: false,
+            add_to_required: false,
         };
 
+        // `required_consts` of the promoted itself gets filled while building the MIR body.
         let mut promoted = promoter.promote_candidate(candidate, promotions.len());
         promoted.source.promoted = Some(promotions.next_index());
         promotions.push(promoted);
diff --git a/compiler/rustc_mir_transform/src/ref_prop.rs b/compiler/rustc_mir_transform/src/ref_prop.rs
index d5642be5513..044ae32c1d4 100644
--- a/compiler/rustc_mir_transform/src/ref_prop.rs
+++ b/compiler/rustc_mir_transform/src/ref_prop.rs
@@ -82,7 +82,8 @@ impl<'tcx> MirPass<'tcx> for ReferencePropagation {
 }
 
 fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool {
-    let ssa = SsaLocals::new(body);
+    let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
+    let ssa = SsaLocals::new(tcx, body, param_env);
 
     let mut replacer = compute_replacement(tcx, body, &ssa);
     debug!(?replacer.targets);
diff --git a/compiler/rustc_mir_transform/src/required_consts.rs b/compiler/rustc_mir_transform/src/required_consts.rs
index abde6a47e83..71ac929d35e 100644
--- a/compiler/rustc_mir_transform/src/required_consts.rs
+++ b/compiler/rustc_mir_transform/src/required_consts.rs
@@ -1,6 +1,5 @@
 use rustc_middle::mir::visit::Visitor;
-use rustc_middle::mir::{Const, ConstOperand, Location};
-use rustc_middle::ty::ConstKind;
+use rustc_middle::mir::{ConstOperand, Location};
 
 pub struct RequiredConstsVisitor<'a, 'tcx> {
     required_consts: &'a mut Vec<ConstOperand<'tcx>>,
@@ -14,14 +13,8 @@ impl<'a, 'tcx> RequiredConstsVisitor<'a, 'tcx> {
 
 impl<'tcx> Visitor<'tcx> for RequiredConstsVisitor<'_, 'tcx> {
     fn visit_constant(&mut self, constant: &ConstOperand<'tcx>, _: Location) {
-        let const_ = constant.const_;
-        match const_ {
-            Const::Ty(c) => match c.kind() {
-                ConstKind::Param(_) | ConstKind::Error(_) | ConstKind::Value(_) => {}
-                _ => bug!("only ConstKind::Param/Value should be encountered here, got {:#?}", c),
-            },
-            Const::Unevaluated(..) => self.required_consts.push(*constant),
-            Const::Val(..) => {}
+        if constant.const_.is_required_const() {
+            self.required_consts.push(*constant);
         }
     }
 }
diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs
index fa6906bdd55..1c85a604053 100644
--- a/compiler/rustc_mir_transform/src/shim.rs
+++ b/compiler/rustc_mir_transform/src/shim.rs
@@ -22,6 +22,8 @@ use crate::{
 use rustc_middle::mir::patch::MirPatch;
 use rustc_mir_dataflow::elaborate_drops::{self, DropElaborator, DropFlagMode, DropStyle};
 
+mod async_destructor_ctor;
+
 pub fn provide(providers: &mut Providers) {
     providers.mir_shims = make_shim;
 }
@@ -127,6 +129,9 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'
         ty::InstanceDef::ThreadLocalShim(..) => build_thread_local_shim(tcx, instance),
         ty::InstanceDef::CloneShim(def_id, ty) => build_clone_shim(tcx, def_id, ty),
         ty::InstanceDef::FnPtrAddrShim(def_id, ty) => build_fn_ptr_addr_shim(tcx, def_id, ty),
+        ty::InstanceDef::AsyncDropGlueCtorShim(def_id, ty) => {
+            async_destructor_ctor::build_async_destructor_ctor_shim(tcx, def_id, ty)
+        }
         ty::InstanceDef::Virtual(..) => {
             bug!("InstanceDef::Virtual ({:?}) is for direct calls only", instance)
         }
diff --git a/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs b/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs
new file mode 100644
index 00000000000..80eadb9abdc
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs
@@ -0,0 +1,618 @@
+use std::iter;
+
+use itertools::Itertools;
+use rustc_ast::Mutability;
+use rustc_const_eval::interpret;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_index::{Idx, IndexVec};
+use rustc_middle::mir::{
+    BasicBlock, BasicBlockData, Body, CallSource, CastKind, Const, ConstOperand, ConstValue, Local,
+    LocalDecl, MirSource, Operand, Place, PlaceElem, Rvalue, SourceInfo, Statement, StatementKind,
+    Terminator, TerminatorKind, UnwindAction, UnwindTerminateReason, RETURN_PLACE,
+};
+use rustc_middle::ty::adjustment::PointerCoercion;
+use rustc_middle::ty::util::Discr;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::source_map::respan;
+use rustc_span::{Span, Symbol};
+use rustc_target::abi::{FieldIdx, VariantIdx};
+use rustc_target::spec::PanicStrategy;
+
+use super::{local_decls_for_sig, new_body};
+
+pub fn build_async_destructor_ctor_shim<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    ty: Option<Ty<'tcx>>,
+) -> Body<'tcx> {
+    debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty);
+
+    AsyncDestructorCtorShimBuilder::new(tcx, def_id, ty).build()
+}
+
+/// Builder for async_drop_in_place shim. Functions as a stack machine
+/// to build up an expression using combinators. Stack contains pairs
+/// of locals and types. Combinator is a not yet instantiated pair of a
+/// function and a type, is considered to be an operator which consumes
+/// operands from the stack by instantiating its function and its type
+/// with operand types and moving locals into the function call. Top
+/// pair is considered to be the last operand.
+// FIXME: add mir-opt tests
+struct AsyncDestructorCtorShimBuilder<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    self_ty: Option<Ty<'tcx>>,
+    span: Span,
+    source_info: SourceInfo,
+    param_env: ty::ParamEnv<'tcx>,
+
+    stack: Vec<Operand<'tcx>>,
+    last_bb: BasicBlock,
+    top_cleanup_bb: Option<BasicBlock>,
+
+    locals: IndexVec<Local, LocalDecl<'tcx>>,
+    bbs: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+}
+
+#[derive(Clone, Copy)]
+enum SurfaceDropKind {
+    Async,
+    Sync,
+}
+
+impl<'tcx> AsyncDestructorCtorShimBuilder<'tcx> {
+    const SELF_PTR: Local = Local::from_u32(1);
+    const INPUT_COUNT: usize = 1;
+    const MAX_STACK_LEN: usize = 2;
+
+    fn new(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Option<Ty<'tcx>>) -> Self {
+        let args = if let Some(ty) = self_ty {
+            tcx.mk_args(&[ty.into()])
+        } else {
+            ty::GenericArgs::identity_for_item(tcx, def_id)
+        };
+        let sig = tcx.fn_sig(def_id).instantiate(tcx, args);
+        let sig = tcx.instantiate_bound_regions_with_erased(sig);
+        let span = tcx.def_span(def_id);
+
+        let source_info = SourceInfo::outermost(span);
+
+        debug_assert_eq!(sig.inputs().len(), Self::INPUT_COUNT);
+        let locals = local_decls_for_sig(&sig, span);
+
+        // Usual case: noop() + unwind resume + return
+        let mut bbs = IndexVec::with_capacity(3);
+        let param_env = tcx.param_env_reveal_all_normalized(def_id);
+        AsyncDestructorCtorShimBuilder {
+            tcx,
+            def_id,
+            self_ty,
+            span,
+            source_info,
+            param_env,
+
+            stack: Vec::with_capacity(Self::MAX_STACK_LEN),
+            last_bb: bbs.push(BasicBlockData::new(None)),
+            top_cleanup_bb: match tcx.sess.panic_strategy() {
+                PanicStrategy::Unwind => {
+                    // Don't drop input arg because it's just a pointer
+                    Some(bbs.push(BasicBlockData {
+                        statements: Vec::new(),
+                        terminator: Some(Terminator {
+                            source_info,
+                            kind: TerminatorKind::UnwindResume,
+                        }),
+                        is_cleanup: true,
+                    }))
+                }
+                PanicStrategy::Abort => None,
+            },
+
+            locals,
+            bbs,
+        }
+    }
+
+    fn build(self) -> Body<'tcx> {
+        let (tcx, def_id, Some(self_ty)) = (self.tcx, self.def_id, self.self_ty) else {
+            return self.build_zst_output();
+        };
+
+        let surface_drop_kind = || {
+            let param_env = tcx.param_env_reveal_all_normalized(def_id);
+            if self_ty.has_surface_async_drop(tcx, param_env) {
+                Some(SurfaceDropKind::Async)
+            } else if self_ty.has_surface_drop(tcx, param_env) {
+                Some(SurfaceDropKind::Sync)
+            } else {
+                None
+            }
+        };
+
+        match self_ty.kind() {
+            ty::Array(elem_ty, _) => self.build_slice(true, *elem_ty),
+            ty::Slice(elem_ty) => self.build_slice(false, *elem_ty),
+
+            ty::Tuple(elem_tys) => self.build_chain(None, elem_tys.iter()),
+            ty::Adt(adt_def, args) if adt_def.is_struct() => {
+                let field_tys = adt_def.non_enum_variant().fields.iter().map(|f| f.ty(tcx, args));
+                self.build_chain(surface_drop_kind(), field_tys)
+            }
+            ty::Closure(_, args) => self.build_chain(None, args.as_closure().upvar_tys().iter()),
+            ty::CoroutineClosure(_, args) => {
+                self.build_chain(None, args.as_coroutine_closure().upvar_tys().iter())
+            }
+
+            ty::Adt(adt_def, args) if adt_def.is_enum() => {
+                self.build_enum(*adt_def, *args, surface_drop_kind())
+            }
+
+            ty::Adt(adt_def, _) => {
+                assert!(adt_def.is_union());
+                match surface_drop_kind().unwrap() {
+                    SurfaceDropKind::Async => self.build_fused_async_surface(),
+                    SurfaceDropKind::Sync => self.build_fused_sync_surface(),
+                }
+            }
+
+            ty::Bound(..)
+            | ty::Foreign(_)
+            | ty::Placeholder(_)
+            | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_) | ty::TyVar(_))
+            | ty::Param(_)
+            | ty::Alias(..) => {
+                bug!("Building async destructor for unexpected type: {self_ty:?}")
+            }
+
+            _ => {
+                bug!(
+                    "Building async destructor constructor shim is not yet implemented for type: {self_ty:?}"
+                )
+            }
+        }
+    }
+
+    fn build_enum(
+        mut self,
+        adt_def: ty::AdtDef<'tcx>,
+        args: ty::GenericArgsRef<'tcx>,
+        surface_drop: Option<SurfaceDropKind>,
+    ) -> Body<'tcx> {
+        let tcx = self.tcx;
+
+        let surface = match surface_drop {
+            None => None,
+            Some(kind) => {
+                self.put_self();
+                Some(match kind {
+                    SurfaceDropKind::Async => self.combine_async_surface(),
+                    SurfaceDropKind::Sync => self.combine_sync_surface(),
+                })
+            }
+        };
+
+        let mut other = None;
+        for (variant_idx, discr) in adt_def.discriminants(tcx) {
+            let variant = adt_def.variant(variant_idx);
+
+            let mut chain = None;
+            for (field_idx, field) in variant.fields.iter_enumerated() {
+                let field_ty = field.ty(tcx, args);
+                self.put_variant_field(variant.name, variant_idx, field_idx, field_ty);
+                let defer = self.combine_defer(field_ty);
+                chain = Some(match chain {
+                    None => defer,
+                    Some(chain) => self.combine_chain(chain, defer),
+                })
+            }
+            let variant_dtor = chain.unwrap_or_else(|| self.put_noop());
+
+            other = Some(match other {
+                None => variant_dtor,
+                Some(other) => {
+                    self.put_self();
+                    self.put_discr(discr);
+                    self.combine_either(other, variant_dtor)
+                }
+            });
+        }
+        let variants_dtor = other.unwrap_or_else(|| self.put_noop());
+
+        let dtor = match surface {
+            None => variants_dtor,
+            Some(surface) => self.combine_chain(surface, variants_dtor),
+        };
+        self.combine_fuse(dtor);
+        self.return_()
+    }
+
+    fn build_chain<I>(mut self, surface_drop: Option<SurfaceDropKind>, elem_tys: I) -> Body<'tcx>
+    where
+        I: Iterator<Item = Ty<'tcx>> + ExactSizeIterator,
+    {
+        let surface = match surface_drop {
+            None => None,
+            Some(kind) => {
+                self.put_self();
+                Some(match kind {
+                    SurfaceDropKind::Async => self.combine_async_surface(),
+                    SurfaceDropKind::Sync => self.combine_sync_surface(),
+                })
+            }
+        };
+
+        let mut chain = None;
+        for (field_idx, field_ty) in elem_tys.enumerate().map(|(i, ty)| (FieldIdx::new(i), ty)) {
+            self.put_field(field_idx, field_ty);
+            let defer = self.combine_defer(field_ty);
+            chain = Some(match chain {
+                None => defer,
+                Some(chain) => self.combine_chain(chain, defer),
+            })
+        }
+        let chain = chain.unwrap_or_else(|| self.put_noop());
+
+        let dtor = match surface {
+            None => chain,
+            Some(surface) => self.combine_chain(surface, chain),
+        };
+        self.combine_fuse(dtor);
+        self.return_()
+    }
+
+    fn build_zst_output(mut self) -> Body<'tcx> {
+        self.put_zst_output();
+        self.return_()
+    }
+
+    fn build_fused_async_surface(mut self) -> Body<'tcx> {
+        self.put_self();
+        let surface = self.combine_async_surface();
+        self.combine_fuse(surface);
+        self.return_()
+    }
+
+    fn build_fused_sync_surface(mut self) -> Body<'tcx> {
+        self.put_self();
+        let surface = self.combine_sync_surface();
+        self.combine_fuse(surface);
+        self.return_()
+    }
+
+    fn build_slice(mut self, is_array: bool, elem_ty: Ty<'tcx>) -> Body<'tcx> {
+        if is_array {
+            self.put_array_as_slice(elem_ty)
+        } else {
+            self.put_self()
+        }
+        let dtor = self.combine_slice(elem_ty);
+        self.combine_fuse(dtor);
+        self.return_()
+    }
+
+    fn put_zst_output(&mut self) {
+        let return_ty = self.locals[RETURN_PLACE].ty;
+        self.put_operand(Operand::Constant(Box::new(ConstOperand {
+            span: self.span,
+            user_ty: None,
+            const_: Const::zero_sized(return_ty),
+        })));
+    }
+
+    /// Puts `to_drop: *mut Self` on top of the stack.
+    fn put_self(&mut self) {
+        self.put_operand(Operand::Copy(Self::SELF_PTR.into()))
+    }
+
+    /// Given that `Self is [ElemTy; N]` puts `to_drop: *mut [ElemTy]`
+    /// on top of the stack.
+    fn put_array_as_slice(&mut self, elem_ty: Ty<'tcx>) {
+        let slice_ptr_ty = Ty::new_mut_ptr(self.tcx, Ty::new_slice(self.tcx, elem_ty));
+        self.put_temp_rvalue(Rvalue::Cast(
+            CastKind::PointerCoercion(PointerCoercion::Unsize),
+            Operand::Copy(Self::SELF_PTR.into()),
+            slice_ptr_ty,
+        ))
+    }
+
+    /// If given Self is a struct puts `to_drop: *mut FieldTy` on top
+    /// of the stack.
+    fn put_field(&mut self, field: FieldIdx, field_ty: Ty<'tcx>) {
+        let place = Place {
+            local: Self::SELF_PTR,
+            projection: self
+                .tcx
+                .mk_place_elems(&[PlaceElem::Deref, PlaceElem::Field(field, field_ty)]),
+        };
+        self.put_temp_rvalue(Rvalue::AddressOf(Mutability::Mut, place))
+    }
+
+    /// If given Self is an enum puts `to_drop: *mut FieldTy` on top of
+    /// the stack.
+    fn put_variant_field(
+        &mut self,
+        variant_sym: Symbol,
+        variant: VariantIdx,
+        field: FieldIdx,
+        field_ty: Ty<'tcx>,
+    ) {
+        let place = Place {
+            local: Self::SELF_PTR,
+            projection: self.tcx.mk_place_elems(&[
+                PlaceElem::Deref,
+                PlaceElem::Downcast(Some(variant_sym), variant),
+                PlaceElem::Field(field, field_ty),
+            ]),
+        };
+        self.put_temp_rvalue(Rvalue::AddressOf(Mutability::Mut, place))
+    }
+
+    /// If given Self is an enum puts `to_drop: *mut FieldTy` on top of
+    /// the stack.
+    fn put_discr(&mut self, discr: Discr<'tcx>) {
+        let (size, _) = discr.ty.int_size_and_signed(self.tcx);
+        self.put_operand(Operand::const_from_scalar(
+            self.tcx,
+            discr.ty,
+            interpret::Scalar::from_uint(discr.val, size),
+            self.span,
+        ));
+    }
+
+    /// Puts `x: RvalueType` on top of the stack.
+    fn put_temp_rvalue(&mut self, rvalue: Rvalue<'tcx>) {
+        let last_bb = &mut self.bbs[self.last_bb];
+        debug_assert!(last_bb.terminator.is_none());
+        let source_info = self.source_info;
+
+        let local_ty = rvalue.ty(&self.locals, self.tcx);
+        // We need to create a new local to be able to "consume" it with
+        // a combinator
+        let local = self.locals.push(LocalDecl::with_source_info(local_ty, source_info));
+        last_bb.statements.extend_from_slice(&[
+            Statement { source_info, kind: StatementKind::StorageLive(local) },
+            Statement {
+                source_info,
+                kind: StatementKind::Assign(Box::new((local.into(), rvalue))),
+            },
+        ]);
+
+        self.put_operand(Operand::Move(local.into()));
+    }
+
+    /// Puts operand on top of the stack.
+    fn put_operand(&mut self, operand: Operand<'tcx>) {
+        if let Some(top_cleanup_bb) = &mut self.top_cleanup_bb {
+            let source_info = self.source_info;
+            match &operand {
+                Operand::Copy(_) | Operand::Constant(_) => {
+                    *top_cleanup_bb = self.bbs.push(BasicBlockData {
+                        statements: Vec::new(),
+                        terminator: Some(Terminator {
+                            source_info,
+                            kind: TerminatorKind::Goto { target: *top_cleanup_bb },
+                        }),
+                        is_cleanup: true,
+                    });
+                }
+                Operand::Move(place) => {
+                    let local = place.as_local().unwrap();
+                    *top_cleanup_bb = self.bbs.push(BasicBlockData {
+                        statements: Vec::new(),
+                        terminator: Some(Terminator {
+                            source_info,
+                            kind: if self.locals[local].ty.needs_drop(self.tcx, self.param_env) {
+                                TerminatorKind::Drop {
+                                    place: local.into(),
+                                    target: *top_cleanup_bb,
+                                    unwind: UnwindAction::Terminate(
+                                        UnwindTerminateReason::InCleanup,
+                                    ),
+                                    replace: false,
+                                }
+                            } else {
+                                TerminatorKind::Goto { target: *top_cleanup_bb }
+                            },
+                        }),
+                        is_cleanup: true,
+                    });
+                }
+            };
+        }
+        self.stack.push(operand);
+    }
+
+    /// Puts `noop: async_drop::Noop` on top of the stack
+    fn put_noop(&mut self) -> Ty<'tcx> {
+        self.apply_combinator(0, LangItem::AsyncDropNoop, &[])
+    }
+
+    fn combine_async_surface(&mut self) -> Ty<'tcx> {
+        self.apply_combinator(1, LangItem::SurfaceAsyncDropInPlace, &[self.self_ty.unwrap().into()])
+    }
+
+    fn combine_sync_surface(&mut self) -> Ty<'tcx> {
+        self.apply_combinator(
+            1,
+            LangItem::AsyncDropSurfaceDropInPlace,
+            &[self.self_ty.unwrap().into()],
+        )
+    }
+
+    fn combine_fuse(&mut self, inner_future_ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.apply_combinator(1, LangItem::AsyncDropFuse, &[inner_future_ty.into()])
+    }
+
+    fn combine_slice(&mut self, elem_ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.apply_combinator(1, LangItem::AsyncDropSlice, &[elem_ty.into()])
+    }
+
+    fn combine_defer(&mut self, to_drop_ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.apply_combinator(1, LangItem::AsyncDropDefer, &[to_drop_ty.into()])
+    }
+
+    fn combine_chain(&mut self, first: Ty<'tcx>, second: Ty<'tcx>) -> Ty<'tcx> {
+        self.apply_combinator(2, LangItem::AsyncDropChain, &[first.into(), second.into()])
+    }
+
+    fn combine_either(&mut self, other: Ty<'tcx>, matched: Ty<'tcx>) -> Ty<'tcx> {
+        self.apply_combinator(
+            4,
+            LangItem::AsyncDropEither,
+            &[other.into(), matched.into(), self.self_ty.unwrap().into()],
+        )
+    }
+
+    fn return_(mut self) -> Body<'tcx> {
+        let last_bb = &mut self.bbs[self.last_bb];
+        debug_assert!(last_bb.terminator.is_none());
+        let source_info = self.source_info;
+
+        let (1, Some(output)) = (self.stack.len(), self.stack.pop()) else {
+            span_bug!(
+                self.span,
+                "async destructor ctor shim builder finished with invalid number of stack items: expected 1 found {}",
+                self.stack.len(),
+            )
+        };
+        #[cfg(debug_assertions)]
+        if let Some(ty) = self.self_ty {
+            debug_assert_eq!(
+                output.ty(&self.locals, self.tcx),
+                ty.async_destructor_ty(self.tcx, self.param_env),
+                "output async destructor types did not match for type: {ty:?}",
+            );
+        }
+
+        let dead_storage = match &output {
+            Operand::Move(place) => Some(Statement {
+                source_info,
+                kind: StatementKind::StorageDead(place.as_local().unwrap()),
+            }),
+            _ => None,
+        };
+
+        last_bb.statements.extend(
+            iter::once(Statement {
+                source_info,
+                kind: StatementKind::Assign(Box::new((RETURN_PLACE.into(), Rvalue::Use(output)))),
+            })
+            .chain(dead_storage),
+        );
+
+        last_bb.terminator = Some(Terminator { source_info, kind: TerminatorKind::Return });
+
+        let source = MirSource::from_instance(ty::InstanceDef::AsyncDropGlueCtorShim(
+            self.def_id,
+            self.self_ty,
+        ));
+        new_body(source, self.bbs, self.locals, Self::INPUT_COUNT, self.span)
+    }
+
+    fn apply_combinator(
+        &mut self,
+        arity: usize,
+        function: LangItem,
+        args: &[ty::GenericArg<'tcx>],
+    ) -> Ty<'tcx> {
+        let function = self.tcx.require_lang_item(function, Some(self.span));
+        let operands_split = self
+            .stack
+            .len()
+            .checked_sub(arity)
+            .expect("async destructor ctor shim combinator tried to consume too many items");
+        let operands = &self.stack[operands_split..];
+
+        let func_ty = Ty::new_fn_def(self.tcx, function, args.iter().copied());
+        let func_sig = func_ty.fn_sig(self.tcx).no_bound_vars().unwrap();
+        #[cfg(debug_assertions)]
+        operands.iter().zip(func_sig.inputs()).for_each(|(operand, expected_ty)| {
+            let operand_ty = operand.ty(&self.locals, self.tcx);
+            if operand_ty == *expected_ty {
+                return;
+            }
+
+            // If projection of Discriminant then compare with `Ty::discriminant_ty`
+            if let ty::Alias(ty::AliasKind::Projection, ty::AliasTy { args, def_id, .. }) =
+                expected_ty.kind()
+                && Some(*def_id) == self.tcx.lang_items().discriminant_type()
+                && args.first().unwrap().as_type().unwrap().discriminant_ty(self.tcx) == operand_ty
+            {
+                return;
+            }
+
+            span_bug!(
+                self.span,
+                "Operand type and combinator argument type are not equal.
+    operand_ty: {:?}
+    argument_ty: {:?}
+",
+                operand_ty,
+                expected_ty
+            );
+        });
+
+        let target = self.bbs.push(BasicBlockData {
+            statements: operands
+                .iter()
+                .rev()
+                .filter_map(|o| {
+                    if let Operand::Move(Place { local, projection }) = o {
+                        assert!(projection.is_empty());
+                        Some(Statement {
+                            source_info: self.source_info,
+                            kind: StatementKind::StorageDead(*local),
+                        })
+                    } else {
+                        None
+                    }
+                })
+                .collect(),
+            terminator: None,
+            is_cleanup: false,
+        });
+
+        let dest_ty = func_sig.output();
+        let dest =
+            self.locals.push(LocalDecl::with_source_info(dest_ty, self.source_info).immutable());
+
+        let unwind = if let Some(top_cleanup_bb) = &mut self.top_cleanup_bb {
+            for _ in 0..arity {
+                *top_cleanup_bb =
+                    self.bbs[*top_cleanup_bb].terminator().successors().exactly_one().ok().unwrap();
+            }
+            UnwindAction::Cleanup(*top_cleanup_bb)
+        } else {
+            UnwindAction::Unreachable
+        };
+
+        let last_bb = &mut self.bbs[self.last_bb];
+        debug_assert!(last_bb.terminator.is_none());
+        last_bb.statements.push(Statement {
+            source_info: self.source_info,
+            kind: StatementKind::StorageLive(dest),
+        });
+        last_bb.terminator = Some(Terminator {
+            source_info: self.source_info,
+            kind: TerminatorKind::Call {
+                func: Operand::Constant(Box::new(ConstOperand {
+                    span: self.span,
+                    user_ty: None,
+                    const_: Const::Val(ConstValue::ZeroSized, func_ty),
+                })),
+                destination: dest.into(),
+                target: Some(target),
+                unwind,
+                call_source: CallSource::Misc,
+                fn_span: self.span,
+                args: self.stack.drain(operands_split..).map(|o| respan(self.span, o)).collect(),
+            },
+        });
+
+        self.put_operand(Operand::Move(dest.into()));
+        self.last_bb = target;
+
+        dest_ty
+    }
+}
diff --git a/compiler/rustc_mir_transform/src/ssa.rs b/compiler/rustc_mir_transform/src/ssa.rs
index fddc62e6652..55fed7d9da2 100644
--- a/compiler/rustc_mir_transform/src/ssa.rs
+++ b/compiler/rustc_mir_transform/src/ssa.rs
@@ -2,8 +2,9 @@
 //! 1/ They are only assigned-to once, either as a function parameter, or in an assign statement;
 //! 2/ This single assignment dominates all uses;
 //!
-//! As a consequence of rule 2, we consider that borrowed locals are not SSA, even if they are
-//! `Freeze`, as we do not track that the assignment dominates all uses of the borrow.
+//! As we do not track indirect assignments, a local that has its address taken (either by
+//! AddressOf or by borrowing) is considered non-SSA. However, it is UB to modify through an
+//! immutable borrow of a `Freeze` local. Those can still be considered to be SSA.
 
 use rustc_data_structures::graph::dominators::Dominators;
 use rustc_index::bit_set::BitSet;
@@ -11,6 +12,7 @@ use rustc_index::{IndexSlice, IndexVec};
 use rustc_middle::middle::resolve_bound_vars::Set1;
 use rustc_middle::mir::visit::*;
 use rustc_middle::mir::*;
+use rustc_middle::ty::{ParamEnv, TyCtxt};
 
 pub struct SsaLocals {
     /// Assignments to each local. This defines whether the local is SSA.
@@ -24,6 +26,8 @@ pub struct SsaLocals {
     /// Number of "direct" uses of each local, ie. uses that are not dereferences.
     /// We ignore non-uses (Storage statements, debuginfo).
     direct_uses: IndexVec<Local, u32>,
+    /// Set of SSA locals that are immutably borrowed.
+    borrowed_locals: BitSet<Local>,
 }
 
 pub enum AssignedValue<'a, 'tcx> {
@@ -33,15 +37,22 @@ pub enum AssignedValue<'a, 'tcx> {
 }
 
 impl SsaLocals {
-    pub fn new<'tcx>(body: &Body<'tcx>) -> SsaLocals {
+    pub fn new<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, param_env: ParamEnv<'tcx>) -> SsaLocals {
         let assignment_order = Vec::with_capacity(body.local_decls.len());
 
         let assignments = IndexVec::from_elem(Set1::Empty, &body.local_decls);
         let dominators = body.basic_blocks.dominators();
 
         let direct_uses = IndexVec::from_elem(0, &body.local_decls);
-        let mut visitor =
-            SsaVisitor { body, assignments, assignment_order, dominators, direct_uses };
+        let borrowed_locals = BitSet::new_empty(body.local_decls.len());
+        let mut visitor = SsaVisitor {
+            body,
+            assignments,
+            assignment_order,
+            dominators,
+            direct_uses,
+            borrowed_locals,
+        };
 
         for local in body.args_iter() {
             visitor.assignments[local] = Set1::One(DefLocation::Argument);
@@ -58,6 +69,16 @@ impl SsaLocals {
             visitor.visit_var_debug_info(var_debug_info);
         }
 
+        // The immutability of shared borrows only works on `Freeze` locals. If the visitor found
+        // borrows, we need to check the types. For raw pointers and mutable borrows, the locals
+        // have already been marked as non-SSA.
+        debug!(?visitor.borrowed_locals);
+        for local in visitor.borrowed_locals.iter() {
+            if !body.local_decls[local].ty.is_freeze(tcx, param_env) {
+                visitor.assignments[local] = Set1::Many;
+            }
+        }
+
         debug!(?visitor.assignments);
         debug!(?visitor.direct_uses);
 
@@ -70,6 +91,7 @@ impl SsaLocals {
             assignments: visitor.assignments,
             assignment_order: visitor.assignment_order,
             direct_uses: visitor.direct_uses,
+            borrowed_locals: visitor.borrowed_locals,
             // This is filled by `compute_copy_classes`.
             copy_classes: IndexVec::default(),
         };
@@ -174,6 +196,11 @@ impl SsaLocals {
         &self.copy_classes
     }
 
+    /// Set of SSA locals that are immutably borrowed.
+    pub fn borrowed_locals(&self) -> &BitSet<Local> {
+        &self.borrowed_locals
+    }
+
     /// Make a property uniform on a copy equivalence class by removing elements.
     pub fn meet_copy_equivalence(&self, property: &mut BitSet<Local>) {
         // Consolidate to have a local iff all its copies are.
@@ -208,6 +235,8 @@ struct SsaVisitor<'tcx, 'a> {
     assignments: IndexVec<Local, Set1<DefLocation>>,
     assignment_order: Vec<Local>,
     direct_uses: IndexVec<Local, u32>,
+    // Track locals that are immutably borrowed, so we can check their type is `Freeze` later.
+    borrowed_locals: BitSet<Local>,
 }
 
 impl SsaVisitor<'_, '_> {
@@ -232,16 +261,18 @@ impl<'tcx> Visitor<'tcx> for SsaVisitor<'tcx, '_> {
             PlaceContext::MutatingUse(MutatingUseContext::Projection)
             | PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection) => bug!(),
             // Anything can happen with raw pointers, so remove them.
-            // We do not verify that all uses of the borrow dominate the assignment to `local`,
-            // so we have to remove them too.
-            PlaceContext::NonMutatingUse(
-                NonMutatingUseContext::SharedBorrow
-                | NonMutatingUseContext::FakeBorrow
-                | NonMutatingUseContext::AddressOf,
-            )
+            PlaceContext::NonMutatingUse(NonMutatingUseContext::AddressOf)
             | PlaceContext::MutatingUse(_) => {
                 self.assignments[local] = Set1::Many;
             }
+            // Immutable borrows are ok, but we need to delay a check that the type is `Freeze`.
+            PlaceContext::NonMutatingUse(
+                NonMutatingUseContext::SharedBorrow | NonMutatingUseContext::FakeBorrow,
+            ) => {
+                self.borrowed_locals.insert(local);
+                self.check_dominates(local, loc);
+                self.direct_uses[local] += 1;
+            }
             PlaceContext::NonMutatingUse(_) => {
                 self.check_dominates(local, loc);
                 self.direct_uses[local] += 1;