about summary refs log tree commit diff
path: root/compiler/rustc_mir_transform
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_mir_transform')
-rw-r--r--compiler/rustc_mir_transform/Cargo.toml1
-rw-r--r--compiler/rustc_mir_transform/messages.ftl2
-rw-r--r--compiler/rustc_mir_transform/src/check_inline.rs8
-rw-r--r--compiler/rustc_mir_transform/src/coverage/hir_info.rs128
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mappings.rs262
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs262
-rw-r--r--compiler/rustc_mir_transform/src/coverage/query.rs54
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs55
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs6
-rw-r--r--compiler/rustc_mir_transform/src/cross_crate_inline.rs2
-rw-r--r--compiler/rustc_mir_transform/src/ctfe_limit.rs2
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drop.rs58
-rw-r--r--compiler/rustc_mir_transform/src/errors.rs8
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs15
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs2
-rw-r--r--compiler/rustc_mir_transform/src/instsimplify.rs31
-rw-r--r--compiler/rustc_mir_transform/src/remove_unneeded_drops.rs31
-rw-r--r--compiler/rustc_mir_transform/src/simplify.rs1
-rw-r--r--compiler/rustc_mir_transform/src/sroa.rs6
-rw-r--r--compiler/rustc_mir_transform/src/validate.rs2
20 files changed, 335 insertions, 601 deletions
diff --git a/compiler/rustc_mir_transform/Cargo.toml b/compiler/rustc_mir_transform/Cargo.toml
index a7d0b3acbe4..08c43a4648c 100644
--- a/compiler/rustc_mir_transform/Cargo.toml
+++ b/compiler/rustc_mir_transform/Cargo.toml
@@ -10,7 +10,6 @@ itertools = "0.12"
 rustc_abi = { path = "../rustc_abi" }
 rustc_arena = { path = "../rustc_arena" }
 rustc_ast = { path = "../rustc_ast" }
-rustc_attr_data_structures = { path = "../rustc_attr_data_structures" }
 rustc_const_eval = { path = "../rustc_const_eval" }
 rustc_data_structures = { path = "../rustc_data_structures" }
 rustc_errors = { path = "../rustc_errors" }
diff --git a/compiler/rustc_mir_transform/messages.ftl b/compiler/rustc_mir_transform/messages.ftl
index ae3062f07de..2e08f50e8a9 100644
--- a/compiler/rustc_mir_transform/messages.ftl
+++ b/compiler/rustc_mir_transform/messages.ftl
@@ -9,8 +9,6 @@ mir_transform_const_mut_borrow = taking a mutable reference to a `const` item
     .note2 = the mutable reference will refer to this temporary, not the original `const` item
     .note3 = mutable reference created due to call to this method
 
-mir_transform_exceeds_mcdc_test_vector_limit = number of total test vectors in one function will exceed limit ({$max_num_test_vectors}) if this decision is instrumented, so MC/DC analysis ignores it
-
 mir_transform_ffi_unwind_call = call to {$foreign ->
     [true] foreign function
     *[false] function pointer
diff --git a/compiler/rustc_mir_transform/src/check_inline.rs b/compiler/rustc_mir_transform/src/check_inline.rs
index 14d9532894f..8d28cb3ca00 100644
--- a/compiler/rustc_mir_transform/src/check_inline.rs
+++ b/compiler/rustc_mir_transform/src/check_inline.rs
@@ -1,7 +1,7 @@
 //! Check that a body annotated with `#[rustc_force_inline]` will not fail to inline based on its
 //! definition alone (irrespective of any specific caller).
 
-use rustc_attr_data_structures::InlineAttr;
+use rustc_hir::attrs::InlineAttr;
 use rustc_hir::def_id::DefId;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::mir::{Body, TerminatorKind};
@@ -45,12 +45,6 @@ pub(super) fn is_inline_valid_on_fn<'tcx>(
         return Err("#[rustc_no_mir_inline]");
     }
 
-    // FIXME(#127234): Coverage instrumentation currently doesn't handle inlined
-    // MIR correctly when Modified Condition/Decision Coverage is enabled.
-    if tcx.sess.instrument_coverage_mcdc() {
-        return Err("incompatible with MC/DC coverage");
-    }
-
     let ty = tcx.type_of(def_id);
     if match ty.instantiate_identity().kind() {
         ty::FnDef(..) => tcx.fn_sig(def_id).instantiate_identity().c_variadic(),
diff --git a/compiler/rustc_mir_transform/src/coverage/hir_info.rs b/compiler/rustc_mir_transform/src/coverage/hir_info.rs
new file mode 100644
index 00000000000..28fdc52b06c
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/coverage/hir_info.rs
@@ -0,0 +1,128 @@
+use rustc_hir as hir;
+use rustc_hir::intravisit::{Visitor, walk_expr};
+use rustc_middle::hir::nested_filter;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::Span;
+use rustc_span::def_id::LocalDefId;
+
+/// Function information extracted from HIR by the coverage instrumentor.
+#[derive(Debug)]
+pub(crate) struct ExtractedHirInfo {
+    pub(crate) function_source_hash: u64,
+    pub(crate) is_async_fn: bool,
+    /// The span of the function's signature, if available.
+    /// Must have the same context and filename as the body span.
+    pub(crate) fn_sig_span: Option<Span>,
+    pub(crate) body_span: Span,
+    /// "Holes" are regions within the function body (or its expansions) that
+    /// should not be included in coverage spans for this function
+    /// (e.g. closures and nested items).
+    pub(crate) hole_spans: Vec<Span>,
+}
+
+pub(crate) fn extract_hir_info<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> ExtractedHirInfo {
+    // FIXME(#79625): Consider improving MIR to provide the information needed, to avoid going back
+    // to HIR for it.
+
+    // HACK: For synthetic MIR bodies (async closures), use the def id of the HIR body.
+    if tcx.is_synthetic_mir(def_id) {
+        return extract_hir_info(tcx, tcx.local_parent(def_id));
+    }
+
+    let hir_node = tcx.hir_node_by_def_id(def_id);
+    let fn_body_id = hir_node.body_id().expect("HIR node is a function with body");
+    let hir_body = tcx.hir_body(fn_body_id);
+
+    let maybe_fn_sig = hir_node.fn_sig();
+    let is_async_fn = maybe_fn_sig.is_some_and(|fn_sig| fn_sig.header.is_async());
+
+    let mut body_span = hir_body.value.span;
+
+    use hir::{Closure, Expr, ExprKind, Node};
+    // Unexpand a closure's body span back to the context of its declaration.
+    // This helps with closure bodies that consist of just a single bang-macro,
+    // and also with closure bodies produced by async desugaring.
+    if let Node::Expr(&Expr { kind: ExprKind::Closure(&Closure { fn_decl_span, .. }), .. }) =
+        hir_node
+    {
+        body_span = body_span.find_ancestor_in_same_ctxt(fn_decl_span).unwrap_or(body_span);
+    }
+
+    // The actual signature span is only used if it has the same context and
+    // filename as the body, and precedes the body.
+    let fn_sig_span = maybe_fn_sig.map(|fn_sig| fn_sig.span).filter(|&fn_sig_span| {
+        let source_map = tcx.sess.source_map();
+        let file_idx = |span: Span| source_map.lookup_source_file_idx(span.lo());
+
+        fn_sig_span.eq_ctxt(body_span)
+            && fn_sig_span.hi() <= body_span.lo()
+            && file_idx(fn_sig_span) == file_idx(body_span)
+    });
+
+    let function_source_hash = hash_mir_source(tcx, hir_body);
+
+    let hole_spans = extract_hole_spans_from_hir(tcx, hir_body);
+
+    ExtractedHirInfo { function_source_hash, is_async_fn, fn_sig_span, body_span, hole_spans }
+}
+
+fn hash_mir_source<'tcx>(tcx: TyCtxt<'tcx>, hir_body: &'tcx hir::Body<'tcx>) -> u64 {
+    let owner = hir_body.id().hir_id.owner;
+    tcx.hir_owner_nodes(owner)
+        .opt_hash_including_bodies
+        .expect("hash should be present when coverage instrumentation is enabled")
+        .to_smaller_hash()
+        .as_u64()
+}
+
+fn extract_hole_spans_from_hir<'tcx>(tcx: TyCtxt<'tcx>, hir_body: &hir::Body<'tcx>) -> Vec<Span> {
+    struct HolesVisitor<'tcx> {
+        tcx: TyCtxt<'tcx>,
+        hole_spans: Vec<Span>,
+    }
+
+    impl<'tcx> Visitor<'tcx> for HolesVisitor<'tcx> {
+        /// We have special handling for nested items, but we still want to
+        /// traverse into nested bodies of things that are not considered items,
+        /// such as "anon consts" (e.g. array lengths).
+        type NestedFilter = nested_filter::OnlyBodies;
+
+        fn maybe_tcx(&mut self) -> TyCtxt<'tcx> {
+            self.tcx
+        }
+
+        /// We override `visit_nested_item` instead of `visit_item` because we
+        /// only need the item's span, not the item itself.
+        fn visit_nested_item(&mut self, id: hir::ItemId) -> Self::Result {
+            let span = self.tcx.def_span(id.owner_id.def_id);
+            self.visit_hole_span(span);
+            // Having visited this item, we don't care about its children,
+            // so don't call `walk_item`.
+        }
+
+        // We override `visit_expr` instead of the more specific expression
+        // visitors, so that we have direct access to the expression span.
+        fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+            match expr.kind {
+                hir::ExprKind::Closure(_) | hir::ExprKind::ConstBlock(_) => {
+                    self.visit_hole_span(expr.span);
+                    // Having visited this expression, we don't care about its
+                    // children, so don't call `walk_expr`.
+                }
+
+                // For other expressions, recursively visit as normal.
+                _ => walk_expr(self, expr),
+            }
+        }
+    }
+    impl HolesVisitor<'_> {
+        fn visit_hole_span(&mut self, hole_span: Span) {
+            self.hole_spans.push(hole_span);
+        }
+    }
+
+    let mut visitor = HolesVisitor { tcx, hole_spans: vec![] };
+
+    visitor.visit_body(hir_body);
+    visitor.hole_spans
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/mappings.rs b/compiler/rustc_mir_transform/src/coverage/mappings.rs
index b4b4d0416fb..46fe7c40826 100644
--- a/compiler/rustc_mir_transform/src/coverage/mappings.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mappings.rs
@@ -1,19 +1,13 @@
-use std::collections::BTreeSet;
-
-use rustc_data_structures::fx::FxIndexMap;
 use rustc_index::IndexVec;
-use rustc_middle::mir::coverage::{
-    BlockMarkerId, BranchSpan, ConditionId, ConditionInfo, CoverageInfoHi, CoverageKind,
-};
+use rustc_middle::mir::coverage::{BlockMarkerId, BranchSpan, CoverageInfoHi, CoverageKind};
 use rustc_middle::mir::{self, BasicBlock, StatementKind};
 use rustc_middle::ty::TyCtxt;
 use rustc_span::Span;
 
-use crate::coverage::ExtractedHirInfo;
-use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph, START_BCB};
+use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph};
+use crate::coverage::hir_info::ExtractedHirInfo;
 use crate::coverage::spans::extract_refined_covspans;
 use crate::coverage::unexpand::unexpand_into_body_span;
-use crate::errors::MCDCExceedsTestVectorLimit;
 
 /// Associates an ordinary executable code span with its corresponding BCB.
 #[derive(Debug)]
@@ -22,9 +16,6 @@ pub(super) struct CodeMapping {
     pub(super) bcb: BasicCoverageBlock,
 }
 
-/// This is separate from [`MCDCBranch`] to help prepare for larger changes
-/// that will be needed for improved branch coverage in the future.
-/// (See <https://github.com/rust-lang/rust/pull/124217>.)
 #[derive(Debug)]
 pub(super) struct BranchPair {
     pub(super) span: Span,
@@ -32,40 +23,10 @@ pub(super) struct BranchPair {
     pub(super) false_bcb: BasicCoverageBlock,
 }
 
-/// Associates an MC/DC branch span with condition info besides fields for normal branch.
-#[derive(Debug)]
-pub(super) struct MCDCBranch {
-    pub(super) span: Span,
-    pub(super) true_bcb: BasicCoverageBlock,
-    pub(super) false_bcb: BasicCoverageBlock,
-    pub(super) condition_info: ConditionInfo,
-    // Offset added to test vector idx if this branch is evaluated to true.
-    pub(super) true_index: usize,
-    // Offset added to test vector idx if this branch is evaluated to false.
-    pub(super) false_index: usize,
-}
-
-/// Associates an MC/DC decision with its join BCBs.
-#[derive(Debug)]
-pub(super) struct MCDCDecision {
-    pub(super) span: Span,
-    pub(super) end_bcbs: BTreeSet<BasicCoverageBlock>,
-    pub(super) bitmap_idx: usize,
-    pub(super) num_test_vectors: usize,
-    pub(super) decision_depth: u16,
-}
-
-// LLVM uses `i32` to index the bitmap. Thus `i32::MAX` is the hard limit for number of all test vectors
-// in a function.
-const MCDC_MAX_BITMAP_SIZE: usize = i32::MAX as usize;
-
 #[derive(Default)]
 pub(super) struct ExtractedMappings {
     pub(super) code_mappings: Vec<CodeMapping>,
     pub(super) branch_pairs: Vec<BranchPair>,
-    pub(super) mcdc_bitmap_bits: usize,
-    pub(super) mcdc_degraded_branches: Vec<MCDCBranch>,
-    pub(super) mcdc_mappings: Vec<(MCDCDecision, Vec<MCDCBranch>)>,
 }
 
 /// Extracts coverage-relevant spans from MIR, and associates them with
@@ -78,46 +39,13 @@ pub(super) fn extract_all_mapping_info_from_mir<'tcx>(
 ) -> ExtractedMappings {
     let mut code_mappings = vec![];
     let mut branch_pairs = vec![];
-    let mut mcdc_bitmap_bits = 0;
-    let mut mcdc_degraded_branches = vec![];
-    let mut mcdc_mappings = vec![];
 
-    if hir_info.is_async_fn || tcx.sess.coverage_no_mir_spans() {
-        // An async function desugars into a function that returns a future,
-        // with the user code wrapped in a closure. Any spans in the desugared
-        // outer function will be unhelpful, so just keep the signature span
-        // and ignore all of the spans in the MIR body.
-        //
-        // When debugging flag `-Zcoverage-options=no-mir-spans` is set, we need
-        // to give the same treatment to _all_ functions, because `llvm-cov`
-        // seems to ignore functions that don't have any ordinary code spans.
-        if let Some(span) = hir_info.fn_sig_span {
-            code_mappings.push(CodeMapping { span, bcb: START_BCB });
-        }
-    } else {
-        // Extract coverage spans from MIR statements/terminators as normal.
-        extract_refined_covspans(tcx, mir_body, hir_info, graph, &mut code_mappings);
-    }
+    // Extract ordinary code mappings from MIR statement/terminator spans.
+    extract_refined_covspans(tcx, mir_body, hir_info, graph, &mut code_mappings);
 
     branch_pairs.extend(extract_branch_pairs(mir_body, hir_info, graph));
 
-    extract_mcdc_mappings(
-        mir_body,
-        tcx,
-        hir_info.body_span,
-        graph,
-        &mut mcdc_bitmap_bits,
-        &mut mcdc_degraded_branches,
-        &mut mcdc_mappings,
-    );
-
-    ExtractedMappings {
-        code_mappings,
-        branch_pairs,
-        mcdc_bitmap_bits,
-        mcdc_degraded_branches,
-        mcdc_mappings,
-    }
+    ExtractedMappings { code_mappings, branch_pairs }
 }
 
 fn resolve_block_markers(
@@ -141,12 +69,6 @@ fn resolve_block_markers(
     block_markers
 }
 
-// FIXME: There is currently a lot of redundancy between
-// `extract_branch_pairs` and `extract_mcdc_mappings`. This is needed so
-// that they can each be modified without interfering with the other, but in
-// the long term we should try to bring them together again when branch coverage
-// and MC/DC coverage support are more mature.
-
 pub(super) fn extract_branch_pairs(
     mir_body: &mir::Body<'_>,
     hir_info: &ExtractedHirInfo,
@@ -176,175 +98,3 @@ pub(super) fn extract_branch_pairs(
         })
         .collect::<Vec<_>>()
 }
-
-pub(super) fn extract_mcdc_mappings(
-    mir_body: &mir::Body<'_>,
-    tcx: TyCtxt<'_>,
-    body_span: Span,
-    graph: &CoverageGraph,
-    mcdc_bitmap_bits: &mut usize,
-    mcdc_degraded_branches: &mut impl Extend<MCDCBranch>,
-    mcdc_mappings: &mut impl Extend<(MCDCDecision, Vec<MCDCBranch>)>,
-) {
-    let Some(coverage_info_hi) = mir_body.coverage_info_hi.as_deref() else { return };
-
-    let block_markers = resolve_block_markers(coverage_info_hi, mir_body);
-
-    let bcb_from_marker = |marker: BlockMarkerId| graph.bcb_from_bb(block_markers[marker]?);
-
-    let check_branch_bcb =
-        |raw_span: Span, true_marker: BlockMarkerId, false_marker: BlockMarkerId| {
-            // For now, ignore any branch span that was introduced by
-            // expansion. This makes things like assert macros less noisy.
-            if !raw_span.ctxt().outer_expn_data().is_root() {
-                return None;
-            }
-            let span = unexpand_into_body_span(raw_span, body_span)?;
-
-            let true_bcb = bcb_from_marker(true_marker)?;
-            let false_bcb = bcb_from_marker(false_marker)?;
-            Some((span, true_bcb, false_bcb))
-        };
-
-    let to_mcdc_branch = |&mir::coverage::MCDCBranchSpan {
-                              span: raw_span,
-                              condition_info,
-                              true_marker,
-                              false_marker,
-                          }| {
-        let (span, true_bcb, false_bcb) = check_branch_bcb(raw_span, true_marker, false_marker)?;
-        Some(MCDCBranch {
-            span,
-            true_bcb,
-            false_bcb,
-            condition_info,
-            true_index: usize::MAX,
-            false_index: usize::MAX,
-        })
-    };
-
-    let mut get_bitmap_idx = |num_test_vectors: usize| -> Option<usize> {
-        let bitmap_idx = *mcdc_bitmap_bits;
-        let next_bitmap_bits = bitmap_idx.saturating_add(num_test_vectors);
-        (next_bitmap_bits <= MCDC_MAX_BITMAP_SIZE).then(|| {
-            *mcdc_bitmap_bits = next_bitmap_bits;
-            bitmap_idx
-        })
-    };
-    mcdc_degraded_branches
-        .extend(coverage_info_hi.mcdc_degraded_branch_spans.iter().filter_map(to_mcdc_branch));
-
-    mcdc_mappings.extend(coverage_info_hi.mcdc_spans.iter().filter_map(|(decision, branches)| {
-        if branches.len() == 0 {
-            return None;
-        }
-        let decision_span = unexpand_into_body_span(decision.span, body_span)?;
-
-        let end_bcbs = decision
-            .end_markers
-            .iter()
-            .map(|&marker| bcb_from_marker(marker))
-            .collect::<Option<_>>()?;
-        let mut branch_mappings: Vec<_> = branches.into_iter().filter_map(to_mcdc_branch).collect();
-        if branch_mappings.len() != branches.len() {
-            mcdc_degraded_branches.extend(branch_mappings);
-            return None;
-        }
-        let num_test_vectors = calc_test_vectors_index(&mut branch_mappings);
-        let Some(bitmap_idx) = get_bitmap_idx(num_test_vectors) else {
-            tcx.dcx().emit_warn(MCDCExceedsTestVectorLimit {
-                span: decision_span,
-                max_num_test_vectors: MCDC_MAX_BITMAP_SIZE,
-            });
-            mcdc_degraded_branches.extend(branch_mappings);
-            return None;
-        };
-        // LLVM requires span of the decision contains all spans of its conditions.
-        // Usually the decision span meets the requirement well but in cases like macros it may not.
-        let span = branch_mappings
-            .iter()
-            .map(|branch| branch.span)
-            .reduce(|lhs, rhs| lhs.to(rhs))
-            .map(
-                |joint_span| {
-                    if decision_span.contains(joint_span) { decision_span } else { joint_span }
-                },
-            )
-            .expect("branch mappings are ensured to be non-empty as checked above");
-        Some((
-            MCDCDecision {
-                span,
-                end_bcbs,
-                bitmap_idx,
-                num_test_vectors,
-                decision_depth: decision.decision_depth,
-            },
-            branch_mappings,
-        ))
-    }));
-}
-
-// LLVM checks the executed test vector by accumulating indices of tested branches.
-// We calculate number of all possible test vectors of the decision and assign indices
-// to branches here.
-// See [the rfc](https://discourse.llvm.org/t/rfc-coverage-new-algorithm-and-file-format-for-mc-dc/76798/)
-// for more details about the algorithm.
-// This function is mostly like [`TVIdxBuilder::TvIdxBuilder`](https://github.com/llvm/llvm-project/blob/d594d9f7f4dc6eb748b3261917db689fdc348b96/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp#L226)
-fn calc_test_vectors_index(conditions: &mut Vec<MCDCBranch>) -> usize {
-    let mut indegree_stats = IndexVec::<ConditionId, usize>::from_elem_n(0, conditions.len());
-    // `num_paths` is `width` described at the llvm rfc, which indicates how many paths reaching the condition node.
-    let mut num_paths_stats = IndexVec::<ConditionId, usize>::from_elem_n(0, conditions.len());
-    let mut next_conditions = conditions
-        .iter_mut()
-        .map(|branch| {
-            let ConditionInfo { condition_id, true_next_id, false_next_id } = branch.condition_info;
-            [true_next_id, false_next_id]
-                .into_iter()
-                .flatten()
-                .for_each(|next_id| indegree_stats[next_id] += 1);
-            (condition_id, branch)
-        })
-        .collect::<FxIndexMap<_, _>>();
-
-    let mut queue =
-        std::collections::VecDeque::from_iter(next_conditions.swap_remove(&ConditionId::START));
-    num_paths_stats[ConditionId::START] = 1;
-    let mut decision_end_nodes = Vec::new();
-    while let Some(branch) = queue.pop_front() {
-        let ConditionInfo { condition_id, true_next_id, false_next_id } = branch.condition_info;
-        let (false_index, true_index) = (&mut branch.false_index, &mut branch.true_index);
-        let this_paths_count = num_paths_stats[condition_id];
-        // Note. First check the false next to ensure conditions are touched in same order with llvm-cov.
-        for (next, index) in [(false_next_id, false_index), (true_next_id, true_index)] {
-            if let Some(next_id) = next {
-                let next_paths_count = &mut num_paths_stats[next_id];
-                *index = *next_paths_count;
-                *next_paths_count = next_paths_count.saturating_add(this_paths_count);
-                let next_indegree = &mut indegree_stats[next_id];
-                *next_indegree -= 1;
-                if *next_indegree == 0 {
-                    queue.push_back(next_conditions.swap_remove(&next_id).expect(
-                        "conditions with non-zero indegree before must be in next_conditions",
-                    ));
-                }
-            } else {
-                decision_end_nodes.push((this_paths_count, condition_id, index));
-            }
-        }
-    }
-    assert!(next_conditions.is_empty(), "the decision tree has untouched nodes");
-    let mut cur_idx = 0;
-    // LLVM hopes the end nodes are sorted in descending order by `num_paths` so that it can
-    // optimize bitmap size for decisions in tree form such as `a && b && c && d && ...`.
-    decision_end_nodes.sort_by_key(|(num_paths, _, _)| usize::MAX - *num_paths);
-    for (num_paths, condition_id, index) in decision_end_nodes {
-        assert_eq!(
-            num_paths, num_paths_stats[condition_id],
-            "end nodes should not be updated since they were visited"
-        );
-        assert_eq!(*index, usize::MAX, "end nodes should not be assigned index before");
-        *index = cur_idx;
-        cur_idx += num_paths;
-    }
-    cur_idx
-}
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index f253d1662ca..47a87a2e94d 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -1,5 +1,15 @@
+use rustc_middle::mir::coverage::{CoverageKind, FunctionCoverageInfo, Mapping, MappingKind};
+use rustc_middle::mir::{self, BasicBlock, Statement, StatementKind, TerminatorKind};
+use rustc_middle::ty::TyCtxt;
+use tracing::{debug, debug_span, trace};
+
+use crate::coverage::counters::BcbCountersData;
+use crate::coverage::graph::CoverageGraph;
+use crate::coverage::mappings::ExtractedMappings;
+
 mod counters;
 mod graph;
+mod hir_info;
 mod mappings;
 pub(super) mod query;
 mod spans;
@@ -7,22 +17,6 @@ mod spans;
 mod tests;
 mod unexpand;
 
-use rustc_hir as hir;
-use rustc_hir::intravisit::{Visitor, walk_expr};
-use rustc_middle::hir::nested_filter;
-use rustc_middle::mir::coverage::{
-    CoverageKind, DecisionInfo, FunctionCoverageInfo, Mapping, MappingKind,
-};
-use rustc_middle::mir::{self, BasicBlock, Statement, StatementKind, TerminatorKind};
-use rustc_middle::ty::TyCtxt;
-use rustc_span::Span;
-use rustc_span::def_id::LocalDefId;
-use tracing::{debug, debug_span, trace};
-
-use crate::coverage::counters::BcbCountersData;
-use crate::coverage::graph::CoverageGraph;
-use crate::coverage::mappings::ExtractedMappings;
-
 /// Inserts `StatementKind::Coverage` statements that either instrument the binary with injected
 /// counters, via intrinsic `llvm.instrprof.increment`, and/or inject metadata used during codegen
 /// to construct the coverage map.
@@ -69,7 +63,7 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
     let def_id = mir_body.source.def_id();
     let _span = debug_span!("instrument_function_for_coverage", ?def_id).entered();
 
-    let hir_info = extract_hir_info(tcx, def_id.expect_local());
+    let hir_info = hir_info::extract_hir_info(tcx, def_id.expect_local());
 
     // Build the coverage graph, which is a simplified view of the MIR control-flow
     // graph that ignores some details not relevant to coverage instrumentation.
@@ -95,14 +89,6 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
 
     // Inject coverage statements into MIR.
     inject_coverage_statements(mir_body, &graph);
-    inject_mcdc_statements(mir_body, &graph, &extracted_mappings);
-
-    let mcdc_num_condition_bitmaps = extracted_mappings
-        .mcdc_mappings
-        .iter()
-        .map(|&(mappings::MCDCDecision { decision_depth, .. }, _)| decision_depth)
-        .max()
-        .map_or(0, |max| usize::from(max) + 1);
 
     mir_body.function_coverage_info = Some(Box::new(FunctionCoverageInfo {
         function_source_hash: hir_info.function_source_hash,
@@ -111,9 +97,6 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
         priority_list,
 
         mappings,
-
-        mcdc_bitmap_bits: extracted_mappings.mcdc_bitmap_bits,
-        mcdc_num_condition_bitmaps,
     }));
 }
 
@@ -124,13 +107,7 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
 /// function can potentially be simplified even further.
 fn create_mappings(extracted_mappings: &ExtractedMappings) -> Vec<Mapping> {
     // Fully destructure the mappings struct to make sure we don't miss any kinds.
-    let ExtractedMappings {
-        code_mappings,
-        branch_pairs,
-        mcdc_bitmap_bits: _,
-        mcdc_degraded_branches,
-        mcdc_mappings,
-    } = extracted_mappings;
+    let ExtractedMappings { code_mappings, branch_pairs } = extracted_mappings;
     let mut mappings = Vec::new();
 
     mappings.extend(code_mappings.iter().map(
@@ -148,57 +125,6 @@ fn create_mappings(extracted_mappings: &ExtractedMappings) -> Vec<Mapping> {
         },
     ));
 
-    // MCDC branch mappings are appended with their decisions in case decisions were ignored.
-    mappings.extend(mcdc_degraded_branches.iter().map(
-        |&mappings::MCDCBranch {
-             span,
-             true_bcb,
-             false_bcb,
-             condition_info: _,
-             true_index: _,
-             false_index: _,
-         }| { Mapping { kind: MappingKind::Branch { true_bcb, false_bcb }, span } },
-    ));
-
-    for (decision, branches) in mcdc_mappings {
-        // FIXME(#134497): Previously it was possible for some of these branch
-        // conversions to fail, in which case the remaining branches in the
-        // decision would be degraded to plain `MappingKind::Branch`.
-        // The changes in #134497 made that failure impossible, because the
-        // fallible step was deferred to codegen. But the corresponding code
-        // in codegen wasn't updated to detect the need for a degrade step.
-        let conditions = branches
-            .into_iter()
-            .map(
-                |&mappings::MCDCBranch {
-                     span,
-                     true_bcb,
-                     false_bcb,
-                     condition_info,
-                     true_index: _,
-                     false_index: _,
-                 }| {
-                    Mapping {
-                        kind: MappingKind::MCDCBranch {
-                            true_bcb,
-                            false_bcb,
-                            mcdc_params: condition_info,
-                        },
-                        span,
-                    }
-                },
-            )
-            .collect::<Vec<_>>();
-
-        // LLVM requires end index for counter mapping regions.
-        let kind = MappingKind::MCDCDecision(DecisionInfo {
-            bitmap_idx: (decision.bitmap_idx + decision.num_test_vectors) as u32,
-            num_conditions: u16::try_from(conditions.len()).unwrap(),
-        });
-        let span = decision.span;
-        mappings.extend(std::iter::once(Mapping { kind, span }).chain(conditions.into_iter()));
-    }
-
     mappings
 }
 
@@ -210,51 +136,6 @@ fn inject_coverage_statements<'tcx>(mir_body: &mut mir::Body<'tcx>, graph: &Cove
     }
 }
 
-/// For each conditions inject statements to update condition bitmap after it has been evaluated.
-/// For each decision inject statements to update test vector bitmap after it has been evaluated.
-fn inject_mcdc_statements<'tcx>(
-    mir_body: &mut mir::Body<'tcx>,
-    graph: &CoverageGraph,
-    extracted_mappings: &ExtractedMappings,
-) {
-    for (decision, conditions) in &extracted_mappings.mcdc_mappings {
-        // Inject test vector update first because `inject_statement` always insert new statement at head.
-        for &end in &decision.end_bcbs {
-            let end_bb = graph[end].leader_bb();
-            inject_statement(
-                mir_body,
-                CoverageKind::TestVectorBitmapUpdate {
-                    bitmap_idx: decision.bitmap_idx as u32,
-                    decision_depth: decision.decision_depth,
-                },
-                end_bb,
-            );
-        }
-
-        for &mappings::MCDCBranch {
-            span: _,
-            true_bcb,
-            false_bcb,
-            condition_info: _,
-            true_index,
-            false_index,
-        } in conditions
-        {
-            for (index, bcb) in [(false_index, false_bcb), (true_index, true_bcb)] {
-                let bb = graph[bcb].leader_bb();
-                inject_statement(
-                    mir_body,
-                    CoverageKind::CondBitmapUpdate {
-                        index: index as u32,
-                        decision_depth: decision.decision_depth,
-                    },
-                    bb,
-                );
-            }
-        }
-    }
-}
-
 fn inject_statement(mir_body: &mut mir::Body<'_>, counter_kind: CoverageKind, bb: BasicBlock) {
     debug!("  injecting statement {counter_kind:?} for {bb:?}");
     let data = &mut mir_body[bb];
@@ -262,122 +143,3 @@ fn inject_statement(mir_body: &mut mir::Body<'_>, counter_kind: CoverageKind, bb
     let statement = Statement::new(source_info, StatementKind::Coverage(counter_kind));
     data.statements.insert(0, statement);
 }
-
-/// Function information extracted from HIR by the coverage instrumentor.
-#[derive(Debug)]
-struct ExtractedHirInfo {
-    function_source_hash: u64,
-    is_async_fn: bool,
-    /// The span of the function's signature, if available.
-    /// Must have the same context and filename as the body span.
-    fn_sig_span: Option<Span>,
-    body_span: Span,
-    /// "Holes" are regions within the function body (or its expansions) that
-    /// should not be included in coverage spans for this function
-    /// (e.g. closures and nested items).
-    hole_spans: Vec<Span>,
-}
-
-fn extract_hir_info<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> ExtractedHirInfo {
-    // FIXME(#79625): Consider improving MIR to provide the information needed, to avoid going back
-    // to HIR for it.
-
-    // HACK: For synthetic MIR bodies (async closures), use the def id of the HIR body.
-    if tcx.is_synthetic_mir(def_id) {
-        return extract_hir_info(tcx, tcx.local_parent(def_id));
-    }
-
-    let hir_node = tcx.hir_node_by_def_id(def_id);
-    let fn_body_id = hir_node.body_id().expect("HIR node is a function with body");
-    let hir_body = tcx.hir_body(fn_body_id);
-
-    let maybe_fn_sig = hir_node.fn_sig();
-    let is_async_fn = maybe_fn_sig.is_some_and(|fn_sig| fn_sig.header.is_async());
-
-    let mut body_span = hir_body.value.span;
-
-    use hir::{Closure, Expr, ExprKind, Node};
-    // Unexpand a closure's body span back to the context of its declaration.
-    // This helps with closure bodies that consist of just a single bang-macro,
-    // and also with closure bodies produced by async desugaring.
-    if let Node::Expr(&Expr { kind: ExprKind::Closure(&Closure { fn_decl_span, .. }), .. }) =
-        hir_node
-    {
-        body_span = body_span.find_ancestor_in_same_ctxt(fn_decl_span).unwrap_or(body_span);
-    }
-
-    // The actual signature span is only used if it has the same context and
-    // filename as the body, and precedes the body.
-    let fn_sig_span = maybe_fn_sig.map(|fn_sig| fn_sig.span).filter(|&fn_sig_span| {
-        let source_map = tcx.sess.source_map();
-        let file_idx = |span: Span| source_map.lookup_source_file_idx(span.lo());
-
-        fn_sig_span.eq_ctxt(body_span)
-            && fn_sig_span.hi() <= body_span.lo()
-            && file_idx(fn_sig_span) == file_idx(body_span)
-    });
-
-    let function_source_hash = hash_mir_source(tcx, hir_body);
-
-    let hole_spans = extract_hole_spans_from_hir(tcx, hir_body);
-
-    ExtractedHirInfo { function_source_hash, is_async_fn, fn_sig_span, body_span, hole_spans }
-}
-
-fn hash_mir_source<'tcx>(tcx: TyCtxt<'tcx>, hir_body: &'tcx hir::Body<'tcx>) -> u64 {
-    // FIXME(cjgillot) Stop hashing HIR manually here.
-    let owner = hir_body.id().hir_id.owner;
-    tcx.hir_owner_nodes(owner).opt_hash_including_bodies.unwrap().to_smaller_hash().as_u64()
-}
-
-fn extract_hole_spans_from_hir<'tcx>(tcx: TyCtxt<'tcx>, hir_body: &hir::Body<'tcx>) -> Vec<Span> {
-    struct HolesVisitor<'tcx> {
-        tcx: TyCtxt<'tcx>,
-        hole_spans: Vec<Span>,
-    }
-
-    impl<'tcx> Visitor<'tcx> for HolesVisitor<'tcx> {
-        /// We have special handling for nested items, but we still want to
-        /// traverse into nested bodies of things that are not considered items,
-        /// such as "anon consts" (e.g. array lengths).
-        type NestedFilter = nested_filter::OnlyBodies;
-
-        fn maybe_tcx(&mut self) -> TyCtxt<'tcx> {
-            self.tcx
-        }
-
-        /// We override `visit_nested_item` instead of `visit_item` because we
-        /// only need the item's span, not the item itself.
-        fn visit_nested_item(&mut self, id: hir::ItemId) -> Self::Result {
-            let span = self.tcx.def_span(id.owner_id.def_id);
-            self.visit_hole_span(span);
-            // Having visited this item, we don't care about its children,
-            // so don't call `walk_item`.
-        }
-
-        // We override `visit_expr` instead of the more specific expression
-        // visitors, so that we have direct access to the expression span.
-        fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
-            match expr.kind {
-                hir::ExprKind::Closure(_) | hir::ExprKind::ConstBlock(_) => {
-                    self.visit_hole_span(expr.span);
-                    // Having visited this expression, we don't care about its
-                    // children, so don't call `walk_expr`.
-                }
-
-                // For other expressions, recursively visit as normal.
-                _ => walk_expr(self, expr),
-            }
-        }
-    }
-    impl HolesVisitor<'_> {
-        fn visit_hole_span(&mut self, hole_span: Span) {
-            self.hole_spans.push(hole_span);
-        }
-    }
-
-    let mut visitor = HolesVisitor { tcx, hole_spans: vec![] };
-
-    visitor.visit_body(hir_body);
-    visitor.hole_spans
-}
diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs
index 551f720c869..63c550c27fe 100644
--- a/compiler/rustc_mir_transform/src/coverage/query.rs
+++ b/compiler/rustc_mir_transform/src/coverage/query.rs
@@ -1,4 +1,5 @@
-use rustc_attr_data_structures::{AttributeKind, CoverageStatus, find_attr};
+use rustc_hir::attrs::{AttributeKind, CoverageAttrKind};
+use rustc_hir::find_attr;
 use rustc_index::bit_set::DenseBitSet;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::mir::coverage::{BasicCoverageBlock, CoverageIdsInfo, CoverageKind, MappingKind};
@@ -32,16 +33,6 @@ fn is_eligible_for_coverage(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
         return false;
     }
 
-    // Don't instrument functions with `#[automatically_derived]` on their
-    // enclosing impl block, on the assumption that most users won't care about
-    // coverage for derived impls.
-    if let Some(impl_of) = tcx.impl_of_assoc(def_id.to_def_id())
-        && tcx.is_automatically_derived(impl_of)
-    {
-        trace!("InstrumentCoverage skipped for {def_id:?} (automatically derived)");
-        return false;
-    }
-
     if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NAKED) {
         trace!("InstrumentCoverage skipped for {def_id:?} (`#[naked]`)");
         return false;
@@ -57,20 +48,32 @@ fn is_eligible_for_coverage(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
 
 /// Query implementation for `coverage_attr_on`.
 fn coverage_attr_on(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
-    // Check for annotations directly on this def.
-    if let Some(coverage_status) =
-        find_attr!(tcx.get_all_attrs(def_id), AttributeKind::Coverage(_, status) => status)
+    // Check for a `#[coverage(..)]` attribute on this def.
+    if let Some(kind) =
+        find_attr!(tcx.get_all_attrs(def_id), AttributeKind::Coverage(_sp, kind) => kind)
     {
-        *coverage_status == CoverageStatus::On
-    } else {
-        match tcx.opt_local_parent(def_id) {
-            // Check the parent def (and so on recursively) until we find an
-            // enclosing attribute or reach the crate root.
-            Some(parent) => tcx.coverage_attr_on(parent),
-            // We reached the crate root without seeing a coverage attribute, so
-            // allow coverage instrumentation by default.
-            None => true,
+        match kind {
+            CoverageAttrKind::On => return true,
+            CoverageAttrKind::Off => return false,
         }
+    };
+
+    // Treat `#[automatically_derived]` as an implied `#[coverage(off)]`, on
+    // the assumption that most users won't want coverage for derived impls.
+    //
+    // This affects not just the associated items of an impl block, but also
+    // any closures and other nested functions within those associated items.
+    if tcx.is_automatically_derived(def_id.to_def_id()) {
+        return false;
+    }
+
+    // Check the parent def (and so on recursively) until we find an
+    // enclosing attribute or reach the crate root.
+    match tcx.opt_local_parent(def_id) {
+        Some(parent) => tcx.coverage_attr_on(parent),
+        // We reached the crate root without seeing a coverage attribute, so
+        // allow coverage instrumentation by default.
+        None => true,
     }
 }
 
@@ -108,11 +111,6 @@ fn coverage_ids_info<'tcx>(
                 bcb_needs_counter.insert(true_bcb);
                 bcb_needs_counter.insert(false_bcb);
             }
-            MappingKind::MCDCBranch { true_bcb, false_bcb, mcdc_params: _ } => {
-                bcb_needs_counter.insert(true_bcb);
-                bcb_needs_counter.insert(false_bcb);
-            }
-            MappingKind::MCDCDecision(_) => {}
         }
     }
 
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
index ec76076020e..ae9459dee84 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -1,12 +1,15 @@
 use rustc_data_structures::fx::FxHashSet;
 use rustc_middle::mir;
+use rustc_middle::mir::coverage::START_BCB;
 use rustc_middle::ty::TyCtxt;
-use rustc_span::{DesugaringKind, ExpnKind, MacroKind, Span};
+use rustc_span::source_map::SourceMap;
+use rustc_span::{BytePos, DesugaringKind, ExpnKind, MacroKind, Span};
 use tracing::instrument;
 
 use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph};
+use crate::coverage::hir_info::ExtractedHirInfo;
 use crate::coverage::spans::from_mir::{Hole, RawSpanFromMir, SpanFromMir};
-use crate::coverage::{ExtractedHirInfo, mappings, unexpand};
+use crate::coverage::{mappings, unexpand};
 
 mod from_mir;
 
@@ -15,8 +18,19 @@ pub(super) fn extract_refined_covspans<'tcx>(
     mir_body: &mir::Body<'tcx>,
     hir_info: &ExtractedHirInfo,
     graph: &CoverageGraph,
-    code_mappings: &mut impl Extend<mappings::CodeMapping>,
+    code_mappings: &mut Vec<mappings::CodeMapping>,
 ) {
+    if hir_info.is_async_fn {
+        // An async function desugars into a function that returns a future,
+        // with the user code wrapped in a closure. Any spans in the desugared
+        // outer function will be unhelpful, so just keep the signature span
+        // and ignore all of the spans in the MIR body.
+        if let Some(span) = hir_info.fn_sig_span {
+            code_mappings.push(mappings::CodeMapping { span, bcb: START_BCB });
+        }
+        return;
+    }
+
     let &ExtractedHirInfo { body_span, .. } = hir_info;
 
     let raw_spans = from_mir::extract_raw_spans_from_mir(mir_body, graph);
@@ -83,8 +97,18 @@ pub(super) fn extract_refined_covspans<'tcx>(
     // Discard any span that overlaps with a hole.
     discard_spans_overlapping_holes(&mut covspans, &holes);
 
-    // Perform more refinement steps after holes have been dealt with.
+    // Discard spans that overlap in unwanted ways.
     let mut covspans = remove_unwanted_overlapping_spans(covspans);
+
+    // For all empty spans, either enlarge them to be non-empty, or discard them.
+    let source_map = tcx.sess.source_map();
+    covspans.retain_mut(|covspan| {
+        let Some(span) = ensure_non_empty_span(source_map, covspan.span) else { return false };
+        covspan.span = span;
+        true
+    });
+
+    // Merge covspans that can be merged.
     covspans.dedup_by(|b, a| a.merge_if_eligible(b));
 
     code_mappings.extend(covspans.into_iter().map(|Covspan { span, bcb }| {
@@ -230,3 +254,26 @@ fn compare_spans(a: Span, b: Span) -> std::cmp::Ordering {
         // - Both have the same start and span A extends further right
         .then_with(|| Ord::cmp(&a.hi(), &b.hi()).reverse())
 }
+
+fn ensure_non_empty_span(source_map: &SourceMap, span: Span) -> Option<Span> {
+    if !span.is_empty() {
+        return Some(span);
+    }
+
+    // The span is empty, so try to enlarge it to cover an adjacent '{' or '}'.
+    source_map
+        .span_to_source(span, |src, start, end| try {
+            // Adjusting span endpoints by `BytePos(1)` is normally a bug,
+            // but in this case we have specifically checked that the character
+            // we're skipping over is one of two specific ASCII characters, so
+            // adjusting by exactly 1 byte is correct.
+            if src.as_bytes().get(end).copied() == Some(b'{') {
+                Some(span.with_hi(span.hi() + BytePos(1)))
+            } else if start > 0 && src.as_bytes()[start - 1] == b'}' {
+                Some(span.with_lo(span.lo() - BytePos(1)))
+            } else {
+                None
+            }
+        })
+        .ok()?
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
index 804cd8ab3f7..7985e1c0798 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
@@ -101,11 +101,7 @@ fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
         StatementKind::Coverage(CoverageKind::BlockMarker { .. }) => None,
 
         // These coverage statements should not exist prior to coverage instrumentation.
-        StatementKind::Coverage(
-            CoverageKind::VirtualCounter { .. }
-            | CoverageKind::CondBitmapUpdate { .. }
-            | CoverageKind::TestVectorBitmapUpdate { .. },
-        ) => bug!(
+        StatementKind::Coverage(CoverageKind::VirtualCounter { .. }) => bug!(
             "Unexpected coverage statement found during coverage instrumentation: {statement:?}"
         ),
     }
diff --git a/compiler/rustc_mir_transform/src/cross_crate_inline.rs b/compiler/rustc_mir_transform/src/cross_crate_inline.rs
index 6d7b7e10ef6..b186c2bd775 100644
--- a/compiler/rustc_mir_transform/src/cross_crate_inline.rs
+++ b/compiler/rustc_mir_transform/src/cross_crate_inline.rs
@@ -1,4 +1,4 @@
-use rustc_attr_data_structures::InlineAttr;
+use rustc_hir::attrs::InlineAttr;
 use rustc_hir::def::DefKind;
 use rustc_hir::def_id::LocalDefId;
 use rustc_middle::mir::visit::Visitor;
diff --git a/compiler/rustc_mir_transform/src/ctfe_limit.rs b/compiler/rustc_mir_transform/src/ctfe_limit.rs
index fb17cca30f4..ac46336b834 100644
--- a/compiler/rustc_mir_transform/src/ctfe_limit.rs
+++ b/compiler/rustc_mir_transform/src/ctfe_limit.rs
@@ -18,7 +18,7 @@ impl<'tcx> crate::MirPass<'tcx> for CtfeLimit {
             .basic_blocks
             .iter_enumerated()
             .filter_map(|(node, node_data)| {
-                if matches!(node_data.terminator().kind, TerminatorKind::Call { .. })
+                if matches!(node_data.terminator().kind, TerminatorKind::Call { .. } | TerminatorKind::TailCall { .. })
                     // Back edges in a CFG indicate loops
                     || has_back_edge(doms, node, node_data)
                 {
diff --git a/compiler/rustc_mir_transform/src/elaborate_drop.rs b/compiler/rustc_mir_transform/src/elaborate_drop.rs
index de96b1f255a..4f3c53d761f 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drop.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drop.rs
@@ -611,6 +611,7 @@ where
     ///
     /// For example, with 3 fields, the drop ladder is
     ///
+    /// ```text
     /// .d0:
     ///     ELAB(drop location.0 [target=.d1, unwind=.c1])
     /// .d1:
@@ -621,8 +622,10 @@ where
     ///     ELAB(drop location.1 [target=.c2])
     /// .c2:
     ///     ELAB(drop location.2 [target=`self.unwind`])
+    /// ```
     ///
     /// For possible-async drops in coroutines we also need dropline ladder
+    /// ```text
     /// .d0 (mainline):
     ///     ELAB(drop location.0 [target=.d1, unwind=.c1, drop=.e1])
     /// .d1 (mainline):
@@ -637,6 +640,7 @@ where
     ///     ELAB(drop location.1 [target=.e2, unwind=.c2])
     /// .e2 (dropline):
     ///     ELAB(drop location.2 [target=`self.drop`, unwind=`self.unwind`])
+    /// ```
     ///
     /// NOTE: this does not clear the master drop flag, so you need
     /// to point succ/unwind on a `drop_ladder_bottom`.
@@ -761,24 +765,37 @@ where
 
         let skip_contents = adt.is_union() || adt.is_manually_drop();
         let contents_drop = if skip_contents {
+            if adt.has_dtor(self.tcx()) && self.elaborator.get_drop_flag(self.path).is_some() {
+                // the top-level drop flag is usually cleared by open_drop_for_adt_contents
+                // types with destructors would still need an empty drop ladder to clear it
+
+                // however, these types are only open dropped in `DropShimElaborator`
+                // which does not have drop flags
+                // a future box-like "DerefMove" trait would allow for this case to happen
+                span_bug!(self.source_info.span, "open dropping partially moved union");
+            }
+
             (self.succ, self.unwind, self.dropline)
         } else {
             self.open_drop_for_adt_contents(adt, args)
         };
 
-        if adt.is_box() {
-            // we need to drop the inside of the box before running the destructor
-            let succ = self.destructor_call_block_sync((contents_drop.0, contents_drop.1));
-            let unwind = contents_drop
-                .1
-                .map(|unwind| self.destructor_call_block_sync((unwind, Unwind::InCleanup)));
-            let dropline = contents_drop
-                .2
-                .map(|dropline| self.destructor_call_block_sync((dropline, contents_drop.1)));
-
-            self.open_drop_for_box_contents(adt, args, succ, unwind, dropline)
-        } else if adt.has_dtor(self.tcx()) {
-            self.destructor_call_block(contents_drop)
+        if adt.has_dtor(self.tcx()) {
+            let destructor_block = if adt.is_box() {
+                // we need to drop the inside of the box before running the destructor
+                let succ = self.destructor_call_block_sync((contents_drop.0, contents_drop.1));
+                let unwind = contents_drop
+                    .1
+                    .map(|unwind| self.destructor_call_block_sync((unwind, Unwind::InCleanup)));
+                let dropline = contents_drop
+                    .2
+                    .map(|dropline| self.destructor_call_block_sync((dropline, contents_drop.1)));
+                self.open_drop_for_box_contents(adt, args, succ, unwind, dropline)
+            } else {
+                self.destructor_call_block(contents_drop)
+            };
+
+            self.drop_flag_test_block(destructor_block, contents_drop.0, contents_drop.1)
         } else {
             contents_drop.0
         }
@@ -982,12 +999,7 @@ where
             unwind.is_cleanup(),
         );
 
-        let destructor_block = self.elaborator.patch().new_block(result);
-
-        let block_start = Location { block: destructor_block, statement_index: 0 };
-        self.elaborator.clear_drop_flag(block_start, self.path, DropFlagMode::Shallow);
-
-        self.drop_flag_test_block(destructor_block, succ, unwind)
+        self.elaborator.patch().new_block(result)
     }
 
     fn destructor_call_block(
@@ -1002,13 +1014,7 @@ where
             && !unwind.is_cleanup()
             && ty.is_async_drop(self.tcx(), self.elaborator.typing_env())
         {
-            let destructor_block =
-                self.build_async_drop(self.place, ty, None, succ, unwind, dropline, true);
-
-            let block_start = Location { block: destructor_block, statement_index: 0 };
-            self.elaborator.clear_drop_flag(block_start, self.path, DropFlagMode::Shallow);
-
-            self.drop_flag_test_block(destructor_block, succ, unwind)
+            self.build_async_drop(self.place, ty, None, succ, unwind, dropline, true)
         } else {
             self.destructor_call_block_sync((succ, unwind))
         }
diff --git a/compiler/rustc_mir_transform/src/errors.rs b/compiler/rustc_mir_transform/src/errors.rs
index cffa0183fa7..ad9635aae33 100644
--- a/compiler/rustc_mir_transform/src/errors.rs
+++ b/compiler/rustc_mir_transform/src/errors.rs
@@ -117,14 +117,6 @@ pub(crate) struct FnItemRef {
     pub ident: Ident,
 }
 
-#[derive(Diagnostic)]
-#[diag(mir_transform_exceeds_mcdc_test_vector_limit)]
-pub(crate) struct MCDCExceedsTestVectorLimit {
-    #[primary_span]
-    pub(crate) span: Span,
-    pub(crate) max_num_test_vectors: usize,
-}
-
 pub(crate) struct MustNotSupend<'a, 'tcx> {
     pub tcx: TyCtxt<'tcx>,
     pub yield_sp: Span,
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index dc99b67a1e8..952da2cdf72 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -756,7 +756,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 && let Some(v) = self.simplify_place_value(&mut pointee, location)
             {
                 value = v;
-                place_ref = pointee.project_deeper(&place.projection[index..], self.tcx).as_ref();
+                // `pointee` holds a `Place`, so `ProjectionElem::Index` holds a `Local`.
+                // That local is SSA, but we otherwise have no guarantee on that local's value at
+                // the current location compared to its value where `pointee` was borrowed.
+                if pointee.projection.iter().all(|elem| !matches!(elem, ProjectionElem::Index(_))) {
+                    place_ref =
+                        pointee.project_deeper(&place.projection[index..], self.tcx).as_ref();
+                }
             }
             if let Some(local) = self.try_as_local(value, location) {
                 // Both `local` and `Place { local: place.local, projection: projection[..index] }`
@@ -774,7 +780,12 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             && let Some(v) = self.simplify_place_value(&mut pointee, location)
         {
             value = v;
-            place_ref = pointee.project_deeper(&[], self.tcx).as_ref();
+            // `pointee` holds a `Place`, so `ProjectionElem::Index` holds a `Local`.
+            // That local is SSA, but we otherwise have no guarantee on that local's value at
+            // the current location compared to its value where `pointee` was borrowed.
+            if pointee.projection.iter().all(|elem| !matches!(elem, ProjectionElem::Index(_))) {
+                place_ref = pointee.project_deeper(&[], self.tcx).as_ref();
+            }
         }
         if let Some(new_local) = self.try_as_local(value, location) {
             place_ref = PlaceRef { local: new_local, projection: &[] };
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index 1c0fc774867..3d49eb4e8ef 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -5,7 +5,7 @@ use std::iter;
 use std::ops::{Range, RangeFrom};
 
 use rustc_abi::{ExternAbi, FieldIdx};
-use rustc_attr_data_structures::{InlineAttr, OptimizeAttr};
+use rustc_hir::attrs::{InlineAttr, OptimizeAttr};
 use rustc_hir::def::DefKind;
 use rustc_hir::def_id::DefId;
 use rustc_index::Idx;
diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs
index dbcaed20953..c83bd25c663 100644
--- a/compiler/rustc_mir_transform/src/instsimplify.rs
+++ b/compiler/rustc_mir_transform/src/instsimplify.rs
@@ -55,6 +55,7 @@ impl<'tcx> crate::MirPass<'tcx> for InstSimplify {
 
             let terminator = block.terminator.as_mut().unwrap();
             ctx.simplify_primitive_clone(terminator, &mut block.statements);
+            ctx.simplify_align_of_slice_val(terminator, &mut block.statements);
             ctx.simplify_intrinsic_assert(terminator);
             ctx.simplify_nounwind_call(terminator);
             simplify_duplicate_switch_targets(terminator);
@@ -252,6 +253,36 @@ impl<'tcx> InstSimplifyContext<'_, 'tcx> {
         terminator.kind = TerminatorKind::Goto { target: *destination_block };
     }
 
+    // Convert `align_of_val::<[T]>(ptr)` to `align_of::<T>()`, since the
+    // alignment of a slice doesn't actually depend on metadata at all
+    // and the element type is always `Sized`.
+    //
+    // This is here so it can run after inlining, where it's more useful.
+    // (LowerIntrinsics is done in cleanup, before the optimization passes.)
+    fn simplify_align_of_slice_val(
+        &self,
+        terminator: &mut Terminator<'tcx>,
+        statements: &mut Vec<Statement<'tcx>>,
+    ) {
+        if let TerminatorKind::Call {
+            func, args, destination, target: Some(destination_block), ..
+        } = &terminator.kind
+            && args.len() == 1
+            && let Some((fn_def_id, generics)) = func.const_fn_def()
+            && self.tcx.is_intrinsic(fn_def_id, sym::align_of_val)
+            && let ty::Slice(elem_ty) = *generics.type_at(0).kind()
+        {
+            statements.push(Statement::new(
+                terminator.source_info,
+                StatementKind::Assign(Box::new((
+                    *destination,
+                    Rvalue::NullaryOp(NullOp::AlignOf, elem_ty),
+                ))),
+            ));
+            terminator.kind = TerminatorKind::Goto { target: *destination_block };
+        }
+    }
+
     fn simplify_nounwind_call(&self, terminator: &mut Terminator<'tcx>) {
         let TerminatorKind::Call { ref func, ref mut unwind, .. } = terminator.kind else {
             return;
diff --git a/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs b/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs
index 43f80508e4a..6c2dfc59da2 100644
--- a/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs
+++ b/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs
@@ -4,7 +4,13 @@
 //! useful because (unlike MIR building) it runs after type checking, so it can make use of
 //! `TypingMode::PostAnalysis` to provide more precise type information, especially about opaque
 //! types.
+//!
+//! When we're optimizing, we also remove calls to `drop_in_place<T>` when `T` isn't `needs_drop`,
+//! as those are essentially equivalent to `Drop` terminators. While the compiler doesn't insert
+//! them automatically, preferring the built-in instead, they're common in generic code (such as
+//! `Vec::truncate`) so removing them from things like inlined `Vec<u8>` is helpful.
 
+use rustc_hir::LangItem;
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
 use tracing::{debug, trace};
@@ -21,15 +27,26 @@ impl<'tcx> crate::MirPass<'tcx> for RemoveUnneededDrops {
         let mut should_simplify = false;
         for block in body.basic_blocks.as_mut() {
             let terminator = block.terminator_mut();
-            if let TerminatorKind::Drop { place, target, .. } = terminator.kind {
-                let ty = place.ty(&body.local_decls, tcx);
-                if ty.ty.needs_drop(tcx, typing_env) {
-                    continue;
+            let (ty, target) = match terminator.kind {
+                TerminatorKind::Drop { place, target, .. } => {
+                    (place.ty(&body.local_decls, tcx).ty, target)
+                }
+                TerminatorKind::Call { ref func, target: Some(target), .. }
+                    if tcx.sess.mir_opt_level() > 0
+                        && let Some((def_id, generics)) = func.const_fn_def()
+                        && tcx.is_lang_item(def_id, LangItem::DropInPlace) =>
+                {
+                    (generics.type_at(0), target)
                 }
-                debug!("SUCCESS: replacing `drop` with goto({:?})", target);
-                terminator.kind = TerminatorKind::Goto { target };
-                should_simplify = true;
+                _ => continue,
+            };
+
+            if ty.needs_drop(tcx, typing_env) {
+                continue;
             }
+            debug!("SUCCESS: replacing `drop` with goto({:?})", target);
+            terminator.kind = TerminatorKind::Goto { target };
+            should_simplify = true;
         }
 
         // if we applied optimizations, we potentially have some cfg to cleanup to
diff --git a/compiler/rustc_mir_transform/src/simplify.rs b/compiler/rustc_mir_transform/src/simplify.rs
index db933da6413..468ef742dfb 100644
--- a/compiler/rustc_mir_transform/src/simplify.rs
+++ b/compiler/rustc_mir_transform/src/simplify.rs
@@ -225,6 +225,7 @@ impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> {
             current = target;
         }
         let last = current;
+        *changed |= *start != last;
         *start = last;
         while let Some((current, mut terminator)) = terminators.pop() {
             let Terminator { kind: TerminatorKind::Goto { ref mut target }, .. } = terminator
diff --git a/compiler/rustc_mir_transform/src/sroa.rs b/compiler/rustc_mir_transform/src/sroa.rs
index 80c4b58a574..38769885f36 100644
--- a/compiler/rustc_mir_transform/src/sroa.rs
+++ b/compiler/rustc_mir_transform/src/sroa.rs
@@ -72,8 +72,12 @@ fn escaping_locals<'tcx>(
             return true;
         }
         if let ty::Adt(def, _args) = ty.kind()
-            && tcx.is_lang_item(def.did(), LangItem::DynMetadata)
+            && (def.repr().simd() || tcx.is_lang_item(def.did(), LangItem::DynMetadata))
         {
+            // Exclude #[repr(simd)] types so that they are not de-optimized into an array
+            // (MCP#838 banned projections into SIMD types, but if the value is unused
+            // this pass sees "all the uses are of the fields" and expands it.)
+
             // codegen wants to see the `DynMetadata<T>`,
             // not the inner reference-to-opaque-type.
             return true;
diff --git a/compiler/rustc_mir_transform/src/validate.rs b/compiler/rustc_mir_transform/src/validate.rs
index 98d12bf0a38..99e4782e470 100644
--- a/compiler/rustc_mir_transform/src/validate.rs
+++ b/compiler/rustc_mir_transform/src/validate.rs
@@ -1,9 +1,9 @@
 //! Validates the MIR to ensure that invariants are upheld.
 
 use rustc_abi::{ExternAbi, FIRST_VARIANT, Size};
-use rustc_attr_data_structures::InlineAttr;
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 use rustc_hir::LangItem;
+use rustc_hir::attrs::InlineAttr;
 use rustc_index::IndexVec;
 use rustc_index::bit_set::DenseBitSet;
 use rustc_infer::infer::TyCtxtInferExt;