about summary refs log tree commit diff
path: root/compiler/rustc_mir_transform/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_mir_transform/src')
-rw-r--r--compiler/rustc_mir_transform/src/add_retag.rs2
-rw-r--r--compiler/rustc_mir_transform/src/check_call_recursion.rs6
-rw-r--r--compiler/rustc_mir_transform/src/check_enums.rs15
-rw-r--r--compiler/rustc_mir_transform/src/check_inline.rs8
-rw-r--r--compiler/rustc_mir_transform/src/check_packed_ref.rs2
-rw-r--r--compiler/rustc_mir_transform/src/coroutine/drop.rs8
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters.rs1
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters/node_flow.rs3
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters/union_find.rs96
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters/union_find/tests.rs32
-rw-r--r--compiler/rustc_mir_transform/src/coverage/hir_info.rs128
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mappings.rs310
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs294
-rw-r--r--compiler/rustc_mir_transform/src/coverage/query.rs48
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs59
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs6
-rw-r--r--compiler/rustc_mir_transform/src/cross_crate_inline.rs2
-rw-r--r--compiler/rustc_mir_transform/src/ctfe_limit.rs2
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drop.rs58
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drops.rs4
-rw-r--r--compiler/rustc_mir_transform/src/errors.rs8
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs614
-rw-r--r--compiler/rustc_mir_transform/src/impossible_predicates.rs25
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs2
-rw-r--r--compiler/rustc_mir_transform/src/inline/cycle.rs44
-rw-r--r--compiler/rustc_mir_transform/src/instsimplify.rs31
-rw-r--r--compiler/rustc_mir_transform/src/pass_manager.rs45
-rw-r--r--compiler/rustc_mir_transform/src/promote_consts.rs19
-rw-r--r--compiler/rustc_mir_transform/src/ref_prop.rs39
-rw-r--r--compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs15
-rw-r--r--compiler/rustc_mir_transform/src/remove_unneeded_drops.rs31
-rw-r--r--compiler/rustc_mir_transform/src/shim.rs6
-rw-r--r--compiler/rustc_mir_transform/src/simplify.rs1
-rw-r--r--compiler/rustc_mir_transform/src/sroa.rs40
-rw-r--r--compiler/rustc_mir_transform/src/ssa.rs26
-rw-r--r--compiler/rustc_mir_transform/src/unreachable_prop.rs2
-rw-r--r--compiler/rustc_mir_transform/src/validate.rs46
37 files changed, 805 insertions, 1273 deletions
diff --git a/compiler/rustc_mir_transform/src/add_retag.rs b/compiler/rustc_mir_transform/src/add_retag.rs
index 3c29d4624b7..fc08c1df870 100644
--- a/compiler/rustc_mir_transform/src/add_retag.rs
+++ b/compiler/rustc_mir_transform/src/add_retag.rs
@@ -4,7 +4,6 @@
 //! of MIR building, and only after this pass we think of the program has having the
 //! normal MIR semantics.
 
-use rustc_hir::LangItem;
 use rustc_middle::mir::*;
 use rustc_middle::ty::{self, Ty, TyCtxt};
 
@@ -28,7 +27,6 @@ fn may_contain_reference<'tcx>(ty: Ty<'tcx>, depth: u32, tcx: TyCtxt<'tcx>) -> b
         // References and Boxes (`noalias` sources)
         ty::Ref(..) => true,
         ty::Adt(..) if ty.is_box() => true,
-        ty::Adt(adt, _) if tcx.is_lang_item(adt.did(), LangItem::PtrUnique) => true,
         // Compound types: recurse
         ty::Array(ty, _) | ty::Slice(ty) => {
             // This does not branch so we keep the depth the same.
diff --git a/compiler/rustc_mir_transform/src/check_call_recursion.rs b/compiler/rustc_mir_transform/src/check_call_recursion.rs
index cace4cd6bba..a9acb1da5a3 100644
--- a/compiler/rustc_mir_transform/src/check_call_recursion.rs
+++ b/compiler/rustc_mir_transform/src/check_call_recursion.rs
@@ -21,7 +21,7 @@ impl<'tcx> MirLint<'tcx> for CheckCallRecursion {
 
         if let DefKind::Fn | DefKind::AssocFn = tcx.def_kind(def_id) {
             // If this is trait/impl method, extract the trait's args.
-            let trait_args = match tcx.trait_of_item(def_id.to_def_id()) {
+            let trait_args = match tcx.trait_of_assoc(def_id.to_def_id()) {
                 Some(trait_def_id) => {
                     let trait_args_count = tcx.generics_of(trait_def_id).count();
                     &GenericArgs::identity_for_item(tcx, def_id)[..trait_args_count]
@@ -43,8 +43,8 @@ impl<'tcx> MirLint<'tcx> for CheckDropRecursion {
 
         // First check if `body` is an `fn drop()` of `Drop`
         if let DefKind::AssocFn = tcx.def_kind(def_id)
-        && let Some(trait_ref) =
-            tcx.impl_of_method(def_id.to_def_id()).and_then(|def_id| tcx.impl_trait_ref(def_id))
+        && let Some(impl_id) = tcx.trait_impl_of_assoc(def_id.to_def_id())
+        && let trait_ref = tcx.impl_trait_ref(impl_id).unwrap()
         && tcx.is_lang_item(trait_ref.instantiate_identity().def_id, LangItem::Drop)
         // avoid erroneous `Drop` impls from causing ICEs below
         && let sig = tcx.fn_sig(def_id).instantiate_identity()
diff --git a/compiler/rustc_mir_transform/src/check_enums.rs b/compiler/rustc_mir_transform/src/check_enums.rs
index 33a87cb9873..12447dc7cbb 100644
--- a/compiler/rustc_mir_transform/src/check_enums.rs
+++ b/compiler/rustc_mir_transform/src/check_enums.rs
@@ -48,6 +48,21 @@ impl<'tcx> crate::MirPass<'tcx> for CheckEnums {
                     let new_block = split_block(basic_blocks, location);
 
                     match check {
+                        EnumCheckType::Direct { op_size, .. }
+                        | EnumCheckType::WithNiche { op_size, .. }
+                            if op_size.bytes() == 0 =>
+                        {
+                            // It is never valid to use a ZST as a discriminant for an inhabited enum, but that will
+                            // have been caught by the type checker. Do nothing but ensure that a bug has been signaled.
+                            tcx.dcx().span_delayed_bug(
+                                source_info.span,
+                                "cannot build enum discriminant from zero-sized type",
+                            );
+                            basic_blocks[block].terminator = Some(Terminator {
+                                source_info,
+                                kind: TerminatorKind::Goto { target: new_block },
+                            });
+                        }
                         EnumCheckType::Direct { source_op, discr, op_size, valid_discrs } => {
                             insert_direct_enum_check(
                                 tcx,
diff --git a/compiler/rustc_mir_transform/src/check_inline.rs b/compiler/rustc_mir_transform/src/check_inline.rs
index 14d9532894f..8d28cb3ca00 100644
--- a/compiler/rustc_mir_transform/src/check_inline.rs
+++ b/compiler/rustc_mir_transform/src/check_inline.rs
@@ -1,7 +1,7 @@
 //! Check that a body annotated with `#[rustc_force_inline]` will not fail to inline based on its
 //! definition alone (irrespective of any specific caller).
 
-use rustc_attr_data_structures::InlineAttr;
+use rustc_hir::attrs::InlineAttr;
 use rustc_hir::def_id::DefId;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::mir::{Body, TerminatorKind};
@@ -45,12 +45,6 @@ pub(super) fn is_inline_valid_on_fn<'tcx>(
         return Err("#[rustc_no_mir_inline]");
     }
 
-    // FIXME(#127234): Coverage instrumentation currently doesn't handle inlined
-    // MIR correctly when Modified Condition/Decision Coverage is enabled.
-    if tcx.sess.instrument_coverage_mcdc() {
-        return Err("incompatible with MC/DC coverage");
-    }
-
     let ty = tcx.type_of(def_id);
     if match ty.instantiate_identity().kind() {
         ty::FnDef(..) => tcx.fn_sig(def_id).instantiate_identity().c_variadic(),
diff --git a/compiler/rustc_mir_transform/src/check_packed_ref.rs b/compiler/rustc_mir_transform/src/check_packed_ref.rs
index e9b85ba6e9d..100104e9de0 100644
--- a/compiler/rustc_mir_transform/src/check_packed_ref.rs
+++ b/compiler/rustc_mir_transform/src/check_packed_ref.rs
@@ -40,7 +40,7 @@ impl<'tcx> Visitor<'tcx> for PackedRefChecker<'_, 'tcx> {
         if context.is_borrow() && util::is_disaligned(self.tcx, self.body, self.typing_env, *place)
         {
             let def_id = self.body.source.instance.def_id();
-            if let Some(impl_def_id) = self.tcx.impl_of_method(def_id)
+            if let Some(impl_def_id) = self.tcx.trait_impl_of_assoc(def_id)
                 && self.tcx.is_builtin_derived(impl_def_id)
             {
                 // If we ever reach here it means that the generated derive
diff --git a/compiler/rustc_mir_transform/src/coroutine/drop.rs b/compiler/rustc_mir_transform/src/coroutine/drop.rs
index 406575c4f43..1a314e029f4 100644
--- a/compiler/rustc_mir_transform/src/coroutine/drop.rs
+++ b/compiler/rustc_mir_transform/src/coroutine/drop.rs
@@ -23,10 +23,10 @@ impl<'tcx> MutVisitor<'tcx> for FixReturnPendingVisitor<'tcx> {
         }
 
         // Converting `_0 = Poll::<Rv>::Pending` to `_0 = Poll::<()>::Pending`
-        if let Rvalue::Aggregate(kind, _) = rvalue {
-            if let AggregateKind::Adt(_, _, ref mut args, _, _) = **kind {
-                *args = self.tcx.mk_args(&[self.tcx.types.unit.into()]);
-            }
+        if let Rvalue::Aggregate(kind, _) = rvalue
+            && let AggregateKind::Adt(_, _, ref mut args, _, _) = **kind
+        {
+            *args = self.tcx.mk_args(&[self.tcx.types.unit.into()]);
         }
     }
 }
diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs
index 5568d42ab8f..879a20e771d 100644
--- a/compiler/rustc_mir_transform/src/coverage/counters.rs
+++ b/compiler/rustc_mir_transform/src/coverage/counters.rs
@@ -16,7 +16,6 @@ use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph};
 
 mod balanced_flow;
 pub(crate) mod node_flow;
-mod union_find;
 
 /// Struct containing the results of [`prepare_bcb_counters_data`].
 pub(crate) struct BcbCountersData {
diff --git a/compiler/rustc_mir_transform/src/coverage/counters/node_flow.rs b/compiler/rustc_mir_transform/src/coverage/counters/node_flow.rs
index 91ed54b8b59..e063f75887b 100644
--- a/compiler/rustc_mir_transform/src/coverage/counters/node_flow.rs
+++ b/compiler/rustc_mir_transform/src/coverage/counters/node_flow.rs
@@ -7,13 +7,12 @@
 //! (Knuth & Stevenson, 1973).
 
 use rustc_data_structures::graph;
+use rustc_data_structures::union_find::UnionFind;
 use rustc_index::bit_set::DenseBitSet;
 use rustc_index::{Idx, IndexSlice, IndexVec};
 pub(crate) use rustc_middle::mir::coverage::NodeFlowData;
 use rustc_middle::mir::coverage::Op;
 
-use crate::coverage::counters::union_find::UnionFind;
-
 #[cfg(test)]
 mod tests;
 
diff --git a/compiler/rustc_mir_transform/src/coverage/counters/union_find.rs b/compiler/rustc_mir_transform/src/coverage/counters/union_find.rs
deleted file mode 100644
index a826a953fa6..00000000000
--- a/compiler/rustc_mir_transform/src/coverage/counters/union_find.rs
+++ /dev/null
@@ -1,96 +0,0 @@
-use std::cmp::Ordering;
-use std::mem;
-
-use rustc_index::{Idx, IndexVec};
-
-#[cfg(test)]
-mod tests;
-
-/// Simple implementation of a union-find data structure, i.e. a disjoint-set
-/// forest.
-#[derive(Debug)]
-pub(crate) struct UnionFind<Key: Idx> {
-    table: IndexVec<Key, UnionFindEntry<Key>>,
-}
-
-#[derive(Debug)]
-struct UnionFindEntry<Key> {
-    /// Transitively points towards the "root" of the set containing this key.
-    ///
-    /// Invariant: A root key is its own parent.
-    parent: Key,
-    /// When merging two "root" keys, their ranks determine which key becomes
-    /// the new root, to prevent the parent tree from becoming unnecessarily
-    /// tall. See [`UnionFind::unify`] for details.
-    rank: u32,
-}
-
-impl<Key: Idx> UnionFind<Key> {
-    /// Creates a new disjoint-set forest containing the keys `0..num_keys`.
-    /// Initially, every key is part of its own one-element set.
-    pub(crate) fn new(num_keys: usize) -> Self {
-        // Initially, every key is the root of its own set, so its parent is itself.
-        Self { table: IndexVec::from_fn_n(|key| UnionFindEntry { parent: key, rank: 0 }, num_keys) }
-    }
-
-    /// Returns the "root" key of the disjoint-set containing the given key.
-    /// If two keys have the same root, they belong to the same set.
-    ///
-    /// Also updates internal data structures to make subsequent `find`
-    /// operations faster.
-    pub(crate) fn find(&mut self, key: Key) -> Key {
-        // Loop until we find a key that is its own parent.
-        let mut curr = key;
-        while let parent = self.table[curr].parent
-            && curr != parent
-        {
-            // Perform "path compression" by peeking one layer ahead, and
-            // setting the current key's parent to that value.
-            // (This works even when `parent` is the root of its set, because
-            // of the invariant that a root is its own parent.)
-            let parent_parent = self.table[parent].parent;
-            self.table[curr].parent = parent_parent;
-
-            // Advance by one step and continue.
-            curr = parent;
-        }
-        curr
-    }
-
-    /// Merges the set containing `a` and the set containing `b` into one set.
-    ///
-    /// Returns the common root of both keys, after the merge.
-    pub(crate) fn unify(&mut self, a: Key, b: Key) -> Key {
-        let mut a = self.find(a);
-        let mut b = self.find(b);
-
-        // If both keys have the same root, they're already in the same set,
-        // so there's nothing more to do.
-        if a == b {
-            return a;
-        };
-
-        // Ensure that `a` has strictly greater rank, swapping if necessary.
-        // If both keys have the same rank, increment the rank of `a` so that
-        // future unifications will also prefer `a`, leading to flatter trees.
-        match Ord::cmp(&self.table[a].rank, &self.table[b].rank) {
-            Ordering::Less => mem::swap(&mut a, &mut b),
-            Ordering::Equal => self.table[a].rank += 1,
-            Ordering::Greater => {}
-        }
-
-        debug_assert!(self.table[a].rank > self.table[b].rank);
-        debug_assert_eq!(self.table[b].parent, b);
-
-        // Make `a` the parent of `b`.
-        self.table[b].parent = a;
-
-        a
-    }
-
-    /// Takes a "snapshot" of the current state of this disjoint-set forest, in
-    /// the form of a vector that directly maps each key to its current root.
-    pub(crate) fn snapshot(&mut self) -> IndexVec<Key, Key> {
-        self.table.indices().map(|key| self.find(key)).collect()
-    }
-}
diff --git a/compiler/rustc_mir_transform/src/coverage/counters/union_find/tests.rs b/compiler/rustc_mir_transform/src/coverage/counters/union_find/tests.rs
deleted file mode 100644
index 34a4e4f8e6e..00000000000
--- a/compiler/rustc_mir_transform/src/coverage/counters/union_find/tests.rs
+++ /dev/null
@@ -1,32 +0,0 @@
-use super::UnionFind;
-
-#[test]
-fn empty() {
-    let mut sets = UnionFind::<u32>::new(10);
-
-    for i in 1..10 {
-        assert_eq!(sets.find(i), i);
-    }
-}
-
-#[test]
-fn transitive() {
-    let mut sets = UnionFind::<u32>::new(10);
-
-    sets.unify(3, 7);
-    sets.unify(4, 2);
-
-    assert_eq!(sets.find(7), sets.find(3));
-    assert_eq!(sets.find(2), sets.find(4));
-    assert_ne!(sets.find(3), sets.find(4));
-
-    sets.unify(7, 4);
-
-    assert_eq!(sets.find(7), sets.find(3));
-    assert_eq!(sets.find(2), sets.find(4));
-    assert_eq!(sets.find(3), sets.find(4));
-
-    for i in [0, 1, 5, 6, 8, 9] {
-        assert_eq!(sets.find(i), i);
-    }
-}
diff --git a/compiler/rustc_mir_transform/src/coverage/hir_info.rs b/compiler/rustc_mir_transform/src/coverage/hir_info.rs
new file mode 100644
index 00000000000..28fdc52b06c
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/coverage/hir_info.rs
@@ -0,0 +1,128 @@
+use rustc_hir as hir;
+use rustc_hir::intravisit::{Visitor, walk_expr};
+use rustc_middle::hir::nested_filter;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::Span;
+use rustc_span::def_id::LocalDefId;
+
+/// Function information extracted from HIR by the coverage instrumentor.
+#[derive(Debug)]
+pub(crate) struct ExtractedHirInfo {
+    pub(crate) function_source_hash: u64,
+    pub(crate) is_async_fn: bool,
+    /// The span of the function's signature, if available.
+    /// Must have the same context and filename as the body span.
+    pub(crate) fn_sig_span: Option<Span>,
+    pub(crate) body_span: Span,
+    /// "Holes" are regions within the function body (or its expansions) that
+    /// should not be included in coverage spans for this function
+    /// (e.g. closures and nested items).
+    pub(crate) hole_spans: Vec<Span>,
+}
+
+pub(crate) fn extract_hir_info<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> ExtractedHirInfo {
+    // FIXME(#79625): Consider improving MIR to provide the information needed, to avoid going back
+    // to HIR for it.
+
+    // HACK: For synthetic MIR bodies (async closures), use the def id of the HIR body.
+    if tcx.is_synthetic_mir(def_id) {
+        return extract_hir_info(tcx, tcx.local_parent(def_id));
+    }
+
+    let hir_node = tcx.hir_node_by_def_id(def_id);
+    let fn_body_id = hir_node.body_id().expect("HIR node is a function with body");
+    let hir_body = tcx.hir_body(fn_body_id);
+
+    let maybe_fn_sig = hir_node.fn_sig();
+    let is_async_fn = maybe_fn_sig.is_some_and(|fn_sig| fn_sig.header.is_async());
+
+    let mut body_span = hir_body.value.span;
+
+    use hir::{Closure, Expr, ExprKind, Node};
+    // Unexpand a closure's body span back to the context of its declaration.
+    // This helps with closure bodies that consist of just a single bang-macro,
+    // and also with closure bodies produced by async desugaring.
+    if let Node::Expr(&Expr { kind: ExprKind::Closure(&Closure { fn_decl_span, .. }), .. }) =
+        hir_node
+    {
+        body_span = body_span.find_ancestor_in_same_ctxt(fn_decl_span).unwrap_or(body_span);
+    }
+
+    // The actual signature span is only used if it has the same context and
+    // filename as the body, and precedes the body.
+    let fn_sig_span = maybe_fn_sig.map(|fn_sig| fn_sig.span).filter(|&fn_sig_span| {
+        let source_map = tcx.sess.source_map();
+        let file_idx = |span: Span| source_map.lookup_source_file_idx(span.lo());
+
+        fn_sig_span.eq_ctxt(body_span)
+            && fn_sig_span.hi() <= body_span.lo()
+            && file_idx(fn_sig_span) == file_idx(body_span)
+    });
+
+    let function_source_hash = hash_mir_source(tcx, hir_body);
+
+    let hole_spans = extract_hole_spans_from_hir(tcx, hir_body);
+
+    ExtractedHirInfo { function_source_hash, is_async_fn, fn_sig_span, body_span, hole_spans }
+}
+
+fn hash_mir_source<'tcx>(tcx: TyCtxt<'tcx>, hir_body: &'tcx hir::Body<'tcx>) -> u64 {
+    let owner = hir_body.id().hir_id.owner;
+    tcx.hir_owner_nodes(owner)
+        .opt_hash_including_bodies
+        .expect("hash should be present when coverage instrumentation is enabled")
+        .to_smaller_hash()
+        .as_u64()
+}
+
+fn extract_hole_spans_from_hir<'tcx>(tcx: TyCtxt<'tcx>, hir_body: &hir::Body<'tcx>) -> Vec<Span> {
+    struct HolesVisitor<'tcx> {
+        tcx: TyCtxt<'tcx>,
+        hole_spans: Vec<Span>,
+    }
+
+    impl<'tcx> Visitor<'tcx> for HolesVisitor<'tcx> {
+        /// We have special handling for nested items, but we still want to
+        /// traverse into nested bodies of things that are not considered items,
+        /// such as "anon consts" (e.g. array lengths).
+        type NestedFilter = nested_filter::OnlyBodies;
+
+        fn maybe_tcx(&mut self) -> TyCtxt<'tcx> {
+            self.tcx
+        }
+
+        /// We override `visit_nested_item` instead of `visit_item` because we
+        /// only need the item's span, not the item itself.
+        fn visit_nested_item(&mut self, id: hir::ItemId) -> Self::Result {
+            let span = self.tcx.def_span(id.owner_id.def_id);
+            self.visit_hole_span(span);
+            // Having visited this item, we don't care about its children,
+            // so don't call `walk_item`.
+        }
+
+        // We override `visit_expr` instead of the more specific expression
+        // visitors, so that we have direct access to the expression span.
+        fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+            match expr.kind {
+                hir::ExprKind::Closure(_) | hir::ExprKind::ConstBlock(_) => {
+                    self.visit_hole_span(expr.span);
+                    // Having visited this expression, we don't care about its
+                    // children, so don't call `walk_expr`.
+                }
+
+                // For other expressions, recursively visit as normal.
+                _ => walk_expr(self, expr),
+            }
+        }
+    }
+    impl HolesVisitor<'_> {
+        fn visit_hole_span(&mut self, hole_span: Span) {
+            self.hole_spans.push(hole_span);
+        }
+    }
+
+    let mut visitor = HolesVisitor { tcx, hole_spans: vec![] };
+
+    visitor.visit_body(hir_body);
+    visitor.hole_spans
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/mappings.rs b/compiler/rustc_mir_transform/src/coverage/mappings.rs
index b4b4d0416fb..8dbe564f517 100644
--- a/compiler/rustc_mir_transform/src/coverage/mappings.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mappings.rs
@@ -1,123 +1,36 @@
-use std::collections::BTreeSet;
-
-use rustc_data_structures::fx::FxIndexMap;
 use rustc_index::IndexVec;
 use rustc_middle::mir::coverage::{
-    BlockMarkerId, BranchSpan, ConditionId, ConditionInfo, CoverageInfoHi, CoverageKind,
+    BlockMarkerId, BranchSpan, CoverageInfoHi, CoverageKind, Mapping, MappingKind,
 };
 use rustc_middle::mir::{self, BasicBlock, StatementKind};
 use rustc_middle::ty::TyCtxt;
-use rustc_span::Span;
 
-use crate::coverage::ExtractedHirInfo;
-use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph, START_BCB};
+use crate::coverage::graph::CoverageGraph;
+use crate::coverage::hir_info::ExtractedHirInfo;
 use crate::coverage::spans::extract_refined_covspans;
 use crate::coverage::unexpand::unexpand_into_body_span;
-use crate::errors::MCDCExceedsTestVectorLimit;
-
-/// Associates an ordinary executable code span with its corresponding BCB.
-#[derive(Debug)]
-pub(super) struct CodeMapping {
-    pub(super) span: Span,
-    pub(super) bcb: BasicCoverageBlock,
-}
-
-/// This is separate from [`MCDCBranch`] to help prepare for larger changes
-/// that will be needed for improved branch coverage in the future.
-/// (See <https://github.com/rust-lang/rust/pull/124217>.)
-#[derive(Debug)]
-pub(super) struct BranchPair {
-    pub(super) span: Span,
-    pub(super) true_bcb: BasicCoverageBlock,
-    pub(super) false_bcb: BasicCoverageBlock,
-}
-
-/// Associates an MC/DC branch span with condition info besides fields for normal branch.
-#[derive(Debug)]
-pub(super) struct MCDCBranch {
-    pub(super) span: Span,
-    pub(super) true_bcb: BasicCoverageBlock,
-    pub(super) false_bcb: BasicCoverageBlock,
-    pub(super) condition_info: ConditionInfo,
-    // Offset added to test vector idx if this branch is evaluated to true.
-    pub(super) true_index: usize,
-    // Offset added to test vector idx if this branch is evaluated to false.
-    pub(super) false_index: usize,
-}
-
-/// Associates an MC/DC decision with its join BCBs.
-#[derive(Debug)]
-pub(super) struct MCDCDecision {
-    pub(super) span: Span,
-    pub(super) end_bcbs: BTreeSet<BasicCoverageBlock>,
-    pub(super) bitmap_idx: usize,
-    pub(super) num_test_vectors: usize,
-    pub(super) decision_depth: u16,
-}
-
-// LLVM uses `i32` to index the bitmap. Thus `i32::MAX` is the hard limit for number of all test vectors
-// in a function.
-const MCDC_MAX_BITMAP_SIZE: usize = i32::MAX as usize;
 
 #[derive(Default)]
-pub(super) struct ExtractedMappings {
-    pub(super) code_mappings: Vec<CodeMapping>,
-    pub(super) branch_pairs: Vec<BranchPair>,
-    pub(super) mcdc_bitmap_bits: usize,
-    pub(super) mcdc_degraded_branches: Vec<MCDCBranch>,
-    pub(super) mcdc_mappings: Vec<(MCDCDecision, Vec<MCDCBranch>)>,
+pub(crate) struct ExtractedMappings {
+    pub(crate) mappings: Vec<Mapping>,
 }
 
-/// Extracts coverage-relevant spans from MIR, and associates them with
-/// their corresponding BCBs.
-pub(super) fn extract_all_mapping_info_from_mir<'tcx>(
+/// Extracts coverage-relevant spans from MIR, and uses them to create
+/// coverage mapping data for inclusion in MIR.
+pub(crate) fn extract_mappings_from_mir<'tcx>(
     tcx: TyCtxt<'tcx>,
     mir_body: &mir::Body<'tcx>,
     hir_info: &ExtractedHirInfo,
     graph: &CoverageGraph,
 ) -> ExtractedMappings {
-    let mut code_mappings = vec![];
-    let mut branch_pairs = vec![];
-    let mut mcdc_bitmap_bits = 0;
-    let mut mcdc_degraded_branches = vec![];
-    let mut mcdc_mappings = vec![];
+    let mut mappings = vec![];
 
-    if hir_info.is_async_fn || tcx.sess.coverage_no_mir_spans() {
-        // An async function desugars into a function that returns a future,
-        // with the user code wrapped in a closure. Any spans in the desugared
-        // outer function will be unhelpful, so just keep the signature span
-        // and ignore all of the spans in the MIR body.
-        //
-        // When debugging flag `-Zcoverage-options=no-mir-spans` is set, we need
-        // to give the same treatment to _all_ functions, because `llvm-cov`
-        // seems to ignore functions that don't have any ordinary code spans.
-        if let Some(span) = hir_info.fn_sig_span {
-            code_mappings.push(CodeMapping { span, bcb: START_BCB });
-        }
-    } else {
-        // Extract coverage spans from MIR statements/terminators as normal.
-        extract_refined_covspans(tcx, mir_body, hir_info, graph, &mut code_mappings);
-    }
+    // Extract ordinary code mappings from MIR statement/terminator spans.
+    extract_refined_covspans(tcx, mir_body, hir_info, graph, &mut mappings);
 
-    branch_pairs.extend(extract_branch_pairs(mir_body, hir_info, graph));
+    extract_branch_mappings(mir_body, hir_info, graph, &mut mappings);
 
-    extract_mcdc_mappings(
-        mir_body,
-        tcx,
-        hir_info.body_span,
-        graph,
-        &mut mcdc_bitmap_bits,
-        &mut mcdc_degraded_branches,
-        &mut mcdc_mappings,
-    );
-
-    ExtractedMappings {
-        code_mappings,
-        branch_pairs,
-        mcdc_bitmap_bits,
-        mcdc_degraded_branches,
-        mcdc_mappings,
-    }
+    ExtractedMappings { mappings }
 }
 
 fn resolve_block_markers(
@@ -141,25 +54,18 @@ fn resolve_block_markers(
     block_markers
 }
 
-// FIXME: There is currently a lot of redundancy between
-// `extract_branch_pairs` and `extract_mcdc_mappings`. This is needed so
-// that they can each be modified without interfering with the other, but in
-// the long term we should try to bring them together again when branch coverage
-// and MC/DC coverage support are more mature.
-
-pub(super) fn extract_branch_pairs(
+pub(super) fn extract_branch_mappings(
     mir_body: &mir::Body<'_>,
     hir_info: &ExtractedHirInfo,
     graph: &CoverageGraph,
-) -> Vec<BranchPair> {
-    let Some(coverage_info_hi) = mir_body.coverage_info_hi.as_deref() else { return vec![] };
+    mappings: &mut Vec<Mapping>,
+) {
+    let Some(coverage_info_hi) = mir_body.coverage_info_hi.as_deref() else { return };
 
     let block_markers = resolve_block_markers(coverage_info_hi, mir_body);
 
-    coverage_info_hi
-        .branch_spans
-        .iter()
-        .filter_map(|&BranchSpan { span: raw_span, true_marker, false_marker }| {
+    mappings.extend(coverage_info_hi.branch_spans.iter().filter_map(
+        |&BranchSpan { span: raw_span, true_marker, false_marker }| try {
             // For now, ignore any branch span that was introduced by
             // expansion. This makes things like assert macros less noisy.
             if !raw_span.ctxt().outer_expn_data().is_root() {
@@ -172,179 +78,7 @@ pub(super) fn extract_branch_pairs(
             let true_bcb = bcb_from_marker(true_marker)?;
             let false_bcb = bcb_from_marker(false_marker)?;
 
-            Some(BranchPair { span, true_bcb, false_bcb })
-        })
-        .collect::<Vec<_>>()
-}
-
-pub(super) fn extract_mcdc_mappings(
-    mir_body: &mir::Body<'_>,
-    tcx: TyCtxt<'_>,
-    body_span: Span,
-    graph: &CoverageGraph,
-    mcdc_bitmap_bits: &mut usize,
-    mcdc_degraded_branches: &mut impl Extend<MCDCBranch>,
-    mcdc_mappings: &mut impl Extend<(MCDCDecision, Vec<MCDCBranch>)>,
-) {
-    let Some(coverage_info_hi) = mir_body.coverage_info_hi.as_deref() else { return };
-
-    let block_markers = resolve_block_markers(coverage_info_hi, mir_body);
-
-    let bcb_from_marker = |marker: BlockMarkerId| graph.bcb_from_bb(block_markers[marker]?);
-
-    let check_branch_bcb =
-        |raw_span: Span, true_marker: BlockMarkerId, false_marker: BlockMarkerId| {
-            // For now, ignore any branch span that was introduced by
-            // expansion. This makes things like assert macros less noisy.
-            if !raw_span.ctxt().outer_expn_data().is_root() {
-                return None;
-            }
-            let span = unexpand_into_body_span(raw_span, body_span)?;
-
-            let true_bcb = bcb_from_marker(true_marker)?;
-            let false_bcb = bcb_from_marker(false_marker)?;
-            Some((span, true_bcb, false_bcb))
-        };
-
-    let to_mcdc_branch = |&mir::coverage::MCDCBranchSpan {
-                              span: raw_span,
-                              condition_info,
-                              true_marker,
-                              false_marker,
-                          }| {
-        let (span, true_bcb, false_bcb) = check_branch_bcb(raw_span, true_marker, false_marker)?;
-        Some(MCDCBranch {
-            span,
-            true_bcb,
-            false_bcb,
-            condition_info,
-            true_index: usize::MAX,
-            false_index: usize::MAX,
-        })
-    };
-
-    let mut get_bitmap_idx = |num_test_vectors: usize| -> Option<usize> {
-        let bitmap_idx = *mcdc_bitmap_bits;
-        let next_bitmap_bits = bitmap_idx.saturating_add(num_test_vectors);
-        (next_bitmap_bits <= MCDC_MAX_BITMAP_SIZE).then(|| {
-            *mcdc_bitmap_bits = next_bitmap_bits;
-            bitmap_idx
-        })
-    };
-    mcdc_degraded_branches
-        .extend(coverage_info_hi.mcdc_degraded_branch_spans.iter().filter_map(to_mcdc_branch));
-
-    mcdc_mappings.extend(coverage_info_hi.mcdc_spans.iter().filter_map(|(decision, branches)| {
-        if branches.len() == 0 {
-            return None;
-        }
-        let decision_span = unexpand_into_body_span(decision.span, body_span)?;
-
-        let end_bcbs = decision
-            .end_markers
-            .iter()
-            .map(|&marker| bcb_from_marker(marker))
-            .collect::<Option<_>>()?;
-        let mut branch_mappings: Vec<_> = branches.into_iter().filter_map(to_mcdc_branch).collect();
-        if branch_mappings.len() != branches.len() {
-            mcdc_degraded_branches.extend(branch_mappings);
-            return None;
-        }
-        let num_test_vectors = calc_test_vectors_index(&mut branch_mappings);
-        let Some(bitmap_idx) = get_bitmap_idx(num_test_vectors) else {
-            tcx.dcx().emit_warn(MCDCExceedsTestVectorLimit {
-                span: decision_span,
-                max_num_test_vectors: MCDC_MAX_BITMAP_SIZE,
-            });
-            mcdc_degraded_branches.extend(branch_mappings);
-            return None;
-        };
-        // LLVM requires span of the decision contains all spans of its conditions.
-        // Usually the decision span meets the requirement well but in cases like macros it may not.
-        let span = branch_mappings
-            .iter()
-            .map(|branch| branch.span)
-            .reduce(|lhs, rhs| lhs.to(rhs))
-            .map(
-                |joint_span| {
-                    if decision_span.contains(joint_span) { decision_span } else { joint_span }
-                },
-            )
-            .expect("branch mappings are ensured to be non-empty as checked above");
-        Some((
-            MCDCDecision {
-                span,
-                end_bcbs,
-                bitmap_idx,
-                num_test_vectors,
-                decision_depth: decision.decision_depth,
-            },
-            branch_mappings,
-        ))
-    }));
-}
-
-// LLVM checks the executed test vector by accumulating indices of tested branches.
-// We calculate number of all possible test vectors of the decision and assign indices
-// to branches here.
-// See [the rfc](https://discourse.llvm.org/t/rfc-coverage-new-algorithm-and-file-format-for-mc-dc/76798/)
-// for more details about the algorithm.
-// This function is mostly like [`TVIdxBuilder::TvIdxBuilder`](https://github.com/llvm/llvm-project/blob/d594d9f7f4dc6eb748b3261917db689fdc348b96/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp#L226)
-fn calc_test_vectors_index(conditions: &mut Vec<MCDCBranch>) -> usize {
-    let mut indegree_stats = IndexVec::<ConditionId, usize>::from_elem_n(0, conditions.len());
-    // `num_paths` is `width` described at the llvm rfc, which indicates how many paths reaching the condition node.
-    let mut num_paths_stats = IndexVec::<ConditionId, usize>::from_elem_n(0, conditions.len());
-    let mut next_conditions = conditions
-        .iter_mut()
-        .map(|branch| {
-            let ConditionInfo { condition_id, true_next_id, false_next_id } = branch.condition_info;
-            [true_next_id, false_next_id]
-                .into_iter()
-                .flatten()
-                .for_each(|next_id| indegree_stats[next_id] += 1);
-            (condition_id, branch)
-        })
-        .collect::<FxIndexMap<_, _>>();
-
-    let mut queue =
-        std::collections::VecDeque::from_iter(next_conditions.swap_remove(&ConditionId::START));
-    num_paths_stats[ConditionId::START] = 1;
-    let mut decision_end_nodes = Vec::new();
-    while let Some(branch) = queue.pop_front() {
-        let ConditionInfo { condition_id, true_next_id, false_next_id } = branch.condition_info;
-        let (false_index, true_index) = (&mut branch.false_index, &mut branch.true_index);
-        let this_paths_count = num_paths_stats[condition_id];
-        // Note. First check the false next to ensure conditions are touched in same order with llvm-cov.
-        for (next, index) in [(false_next_id, false_index), (true_next_id, true_index)] {
-            if let Some(next_id) = next {
-                let next_paths_count = &mut num_paths_stats[next_id];
-                *index = *next_paths_count;
-                *next_paths_count = next_paths_count.saturating_add(this_paths_count);
-                let next_indegree = &mut indegree_stats[next_id];
-                *next_indegree -= 1;
-                if *next_indegree == 0 {
-                    queue.push_back(next_conditions.swap_remove(&next_id).expect(
-                        "conditions with non-zero indegree before must be in next_conditions",
-                    ));
-                }
-            } else {
-                decision_end_nodes.push((this_paths_count, condition_id, index));
-            }
-        }
-    }
-    assert!(next_conditions.is_empty(), "the decision tree has untouched nodes");
-    let mut cur_idx = 0;
-    // LLVM hopes the end nodes are sorted in descending order by `num_paths` so that it can
-    // optimize bitmap size for decisions in tree form such as `a && b && c && d && ...`.
-    decision_end_nodes.sort_by_key(|(num_paths, _, _)| usize::MAX - *num_paths);
-    for (num_paths, condition_id, index) in decision_end_nodes {
-        assert_eq!(
-            num_paths, num_paths_stats[condition_id],
-            "end nodes should not be updated since they were visited"
-        );
-        assert_eq!(*index, usize::MAX, "end nodes should not be assigned index before");
-        *index = cur_idx;
-        cur_idx += num_paths;
-    }
-    cur_idx
+            Mapping { span, kind: MappingKind::Branch { true_bcb, false_bcb } }
+        },
+    ));
 }
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index f253d1662ca..c5fef299244 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -1,5 +1,15 @@
+use rustc_middle::mir::coverage::{CoverageKind, FunctionCoverageInfo};
+use rustc_middle::mir::{self, BasicBlock, Statement, StatementKind, TerminatorKind};
+use rustc_middle::ty::TyCtxt;
+use tracing::{debug, debug_span, trace};
+
+use crate::coverage::counters::BcbCountersData;
+use crate::coverage::graph::CoverageGraph;
+use crate::coverage::mappings::ExtractedMappings;
+
 mod counters;
 mod graph;
+mod hir_info;
 mod mappings;
 pub(super) mod query;
 mod spans;
@@ -7,22 +17,6 @@ mod spans;
 mod tests;
 mod unexpand;
 
-use rustc_hir as hir;
-use rustc_hir::intravisit::{Visitor, walk_expr};
-use rustc_middle::hir::nested_filter;
-use rustc_middle::mir::coverage::{
-    CoverageKind, DecisionInfo, FunctionCoverageInfo, Mapping, MappingKind,
-};
-use rustc_middle::mir::{self, BasicBlock, Statement, StatementKind, TerminatorKind};
-use rustc_middle::ty::TyCtxt;
-use rustc_span::Span;
-use rustc_span::def_id::LocalDefId;
-use tracing::{debug, debug_span, trace};
-
-use crate::coverage::counters::BcbCountersData;
-use crate::coverage::graph::CoverageGraph;
-use crate::coverage::mappings::ExtractedMappings;
-
 /// Inserts `StatementKind::Coverage` statements that either instrument the binary with injected
 /// counters, via intrinsic `llvm.instrprof.increment`, and/or inject metadata used during codegen
 /// to construct the coverage map.
@@ -69,7 +63,7 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
     let def_id = mir_body.source.def_id();
     let _span = debug_span!("instrument_function_for_coverage", ?def_id).entered();
 
-    let hir_info = extract_hir_info(tcx, def_id.expect_local());
+    let hir_info = hir_info::extract_hir_info(tcx, def_id.expect_local());
 
     // Build the coverage graph, which is a simplified view of the MIR control-flow
     // graph that ignores some details not relevant to coverage instrumentation.
@@ -77,10 +71,8 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
 
     ////////////////////////////////////////////////////
     // Extract coverage spans and other mapping info from MIR.
-    let extracted_mappings =
-        mappings::extract_all_mapping_info_from_mir(tcx, mir_body, &hir_info, &graph);
-
-    let mappings = create_mappings(&extracted_mappings);
+    let ExtractedMappings { mappings } =
+        mappings::extract_mappings_from_mir(tcx, mir_body, &hir_info, &graph);
     if mappings.is_empty() {
         // No spans could be converted into valid mappings, so skip this function.
         debug!("no spans could be converted into valid mappings; skipping");
@@ -95,14 +87,6 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
 
     // Inject coverage statements into MIR.
     inject_coverage_statements(mir_body, &graph);
-    inject_mcdc_statements(mir_body, &graph, &extracted_mappings);
-
-    let mcdc_num_condition_bitmaps = extracted_mappings
-        .mcdc_mappings
-        .iter()
-        .map(|&(mappings::MCDCDecision { decision_depth, .. }, _)| decision_depth)
-        .max()
-        .map_or(0, |max| usize::from(max) + 1);
 
     mir_body.function_coverage_info = Some(Box::new(FunctionCoverageInfo {
         function_source_hash: hir_info.function_source_hash,
@@ -111,97 +95,9 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir:
         priority_list,
 
         mappings,
-
-        mcdc_bitmap_bits: extracted_mappings.mcdc_bitmap_bits,
-        mcdc_num_condition_bitmaps,
     }));
 }
 
-/// For each coverage span extracted from MIR, create a corresponding mapping.
-///
-/// FIXME(Zalathar): This used to be where BCBs in the extracted mappings were
-/// resolved to a `CovTerm`. But that is now handled elsewhere, so this
-/// function can potentially be simplified even further.
-fn create_mappings(extracted_mappings: &ExtractedMappings) -> Vec<Mapping> {
-    // Fully destructure the mappings struct to make sure we don't miss any kinds.
-    let ExtractedMappings {
-        code_mappings,
-        branch_pairs,
-        mcdc_bitmap_bits: _,
-        mcdc_degraded_branches,
-        mcdc_mappings,
-    } = extracted_mappings;
-    let mut mappings = Vec::new();
-
-    mappings.extend(code_mappings.iter().map(
-        // Ordinary code mappings are the simplest kind.
-        |&mappings::CodeMapping { span, bcb }| {
-            let kind = MappingKind::Code { bcb };
-            Mapping { kind, span }
-        },
-    ));
-
-    mappings.extend(branch_pairs.iter().map(
-        |&mappings::BranchPair { span, true_bcb, false_bcb }| {
-            let kind = MappingKind::Branch { true_bcb, false_bcb };
-            Mapping { kind, span }
-        },
-    ));
-
-    // MCDC branch mappings are appended with their decisions in case decisions were ignored.
-    mappings.extend(mcdc_degraded_branches.iter().map(
-        |&mappings::MCDCBranch {
-             span,
-             true_bcb,
-             false_bcb,
-             condition_info: _,
-             true_index: _,
-             false_index: _,
-         }| { Mapping { kind: MappingKind::Branch { true_bcb, false_bcb }, span } },
-    ));
-
-    for (decision, branches) in mcdc_mappings {
-        // FIXME(#134497): Previously it was possible for some of these branch
-        // conversions to fail, in which case the remaining branches in the
-        // decision would be degraded to plain `MappingKind::Branch`.
-        // The changes in #134497 made that failure impossible, because the
-        // fallible step was deferred to codegen. But the corresponding code
-        // in codegen wasn't updated to detect the need for a degrade step.
-        let conditions = branches
-            .into_iter()
-            .map(
-                |&mappings::MCDCBranch {
-                     span,
-                     true_bcb,
-                     false_bcb,
-                     condition_info,
-                     true_index: _,
-                     false_index: _,
-                 }| {
-                    Mapping {
-                        kind: MappingKind::MCDCBranch {
-                            true_bcb,
-                            false_bcb,
-                            mcdc_params: condition_info,
-                        },
-                        span,
-                    }
-                },
-            )
-            .collect::<Vec<_>>();
-
-        // LLVM requires end index for counter mapping regions.
-        let kind = MappingKind::MCDCDecision(DecisionInfo {
-            bitmap_idx: (decision.bitmap_idx + decision.num_test_vectors) as u32,
-            num_conditions: u16::try_from(conditions.len()).unwrap(),
-        });
-        let span = decision.span;
-        mappings.extend(std::iter::once(Mapping { kind, span }).chain(conditions.into_iter()));
-    }
-
-    mappings
-}
-
 /// Inject any necessary coverage statements into MIR, so that they influence codegen.
 fn inject_coverage_statements<'tcx>(mir_body: &mut mir::Body<'tcx>, graph: &CoverageGraph) {
     for (bcb, data) in graph.iter_enumerated() {
@@ -210,51 +106,6 @@ fn inject_coverage_statements<'tcx>(mir_body: &mut mir::Body<'tcx>, graph: &Cove
     }
 }
 
-/// For each conditions inject statements to update condition bitmap after it has been evaluated.
-/// For each decision inject statements to update test vector bitmap after it has been evaluated.
-fn inject_mcdc_statements<'tcx>(
-    mir_body: &mut mir::Body<'tcx>,
-    graph: &CoverageGraph,
-    extracted_mappings: &ExtractedMappings,
-) {
-    for (decision, conditions) in &extracted_mappings.mcdc_mappings {
-        // Inject test vector update first because `inject_statement` always insert new statement at head.
-        for &end in &decision.end_bcbs {
-            let end_bb = graph[end].leader_bb();
-            inject_statement(
-                mir_body,
-                CoverageKind::TestVectorBitmapUpdate {
-                    bitmap_idx: decision.bitmap_idx as u32,
-                    decision_depth: decision.decision_depth,
-                },
-                end_bb,
-            );
-        }
-
-        for &mappings::MCDCBranch {
-            span: _,
-            true_bcb,
-            false_bcb,
-            condition_info: _,
-            true_index,
-            false_index,
-        } in conditions
-        {
-            for (index, bcb) in [(false_index, false_bcb), (true_index, true_bcb)] {
-                let bb = graph[bcb].leader_bb();
-                inject_statement(
-                    mir_body,
-                    CoverageKind::CondBitmapUpdate {
-                        index: index as u32,
-                        decision_depth: decision.decision_depth,
-                    },
-                    bb,
-                );
-            }
-        }
-    }
-}
-
 fn inject_statement(mir_body: &mut mir::Body<'_>, counter_kind: CoverageKind, bb: BasicBlock) {
     debug!("  injecting statement {counter_kind:?} for {bb:?}");
     let data = &mut mir_body[bb];
@@ -262,122 +113,3 @@ fn inject_statement(mir_body: &mut mir::Body<'_>, counter_kind: CoverageKind, bb
     let statement = Statement::new(source_info, StatementKind::Coverage(counter_kind));
     data.statements.insert(0, statement);
 }
-
-/// Function information extracted from HIR by the coverage instrumentor.
-#[derive(Debug)]
-struct ExtractedHirInfo {
-    function_source_hash: u64,
-    is_async_fn: bool,
-    /// The span of the function's signature, if available.
-    /// Must have the same context and filename as the body span.
-    fn_sig_span: Option<Span>,
-    body_span: Span,
-    /// "Holes" are regions within the function body (or its expansions) that
-    /// should not be included in coverage spans for this function
-    /// (e.g. closures and nested items).
-    hole_spans: Vec<Span>,
-}
-
-fn extract_hir_info<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> ExtractedHirInfo {
-    // FIXME(#79625): Consider improving MIR to provide the information needed, to avoid going back
-    // to HIR for it.
-
-    // HACK: For synthetic MIR bodies (async closures), use the def id of the HIR body.
-    if tcx.is_synthetic_mir(def_id) {
-        return extract_hir_info(tcx, tcx.local_parent(def_id));
-    }
-
-    let hir_node = tcx.hir_node_by_def_id(def_id);
-    let fn_body_id = hir_node.body_id().expect("HIR node is a function with body");
-    let hir_body = tcx.hir_body(fn_body_id);
-
-    let maybe_fn_sig = hir_node.fn_sig();
-    let is_async_fn = maybe_fn_sig.is_some_and(|fn_sig| fn_sig.header.is_async());
-
-    let mut body_span = hir_body.value.span;
-
-    use hir::{Closure, Expr, ExprKind, Node};
-    // Unexpand a closure's body span back to the context of its declaration.
-    // This helps with closure bodies that consist of just a single bang-macro,
-    // and also with closure bodies produced by async desugaring.
-    if let Node::Expr(&Expr { kind: ExprKind::Closure(&Closure { fn_decl_span, .. }), .. }) =
-        hir_node
-    {
-        body_span = body_span.find_ancestor_in_same_ctxt(fn_decl_span).unwrap_or(body_span);
-    }
-
-    // The actual signature span is only used if it has the same context and
-    // filename as the body, and precedes the body.
-    let fn_sig_span = maybe_fn_sig.map(|fn_sig| fn_sig.span).filter(|&fn_sig_span| {
-        let source_map = tcx.sess.source_map();
-        let file_idx = |span: Span| source_map.lookup_source_file_idx(span.lo());
-
-        fn_sig_span.eq_ctxt(body_span)
-            && fn_sig_span.hi() <= body_span.lo()
-            && file_idx(fn_sig_span) == file_idx(body_span)
-    });
-
-    let function_source_hash = hash_mir_source(tcx, hir_body);
-
-    let hole_spans = extract_hole_spans_from_hir(tcx, hir_body);
-
-    ExtractedHirInfo { function_source_hash, is_async_fn, fn_sig_span, body_span, hole_spans }
-}
-
-fn hash_mir_source<'tcx>(tcx: TyCtxt<'tcx>, hir_body: &'tcx hir::Body<'tcx>) -> u64 {
-    // FIXME(cjgillot) Stop hashing HIR manually here.
-    let owner = hir_body.id().hir_id.owner;
-    tcx.hir_owner_nodes(owner).opt_hash_including_bodies.unwrap().to_smaller_hash().as_u64()
-}
-
-fn extract_hole_spans_from_hir<'tcx>(tcx: TyCtxt<'tcx>, hir_body: &hir::Body<'tcx>) -> Vec<Span> {
-    struct HolesVisitor<'tcx> {
-        tcx: TyCtxt<'tcx>,
-        hole_spans: Vec<Span>,
-    }
-
-    impl<'tcx> Visitor<'tcx> for HolesVisitor<'tcx> {
-        /// We have special handling for nested items, but we still want to
-        /// traverse into nested bodies of things that are not considered items,
-        /// such as "anon consts" (e.g. array lengths).
-        type NestedFilter = nested_filter::OnlyBodies;
-
-        fn maybe_tcx(&mut self) -> TyCtxt<'tcx> {
-            self.tcx
-        }
-
-        /// We override `visit_nested_item` instead of `visit_item` because we
-        /// only need the item's span, not the item itself.
-        fn visit_nested_item(&mut self, id: hir::ItemId) -> Self::Result {
-            let span = self.tcx.def_span(id.owner_id.def_id);
-            self.visit_hole_span(span);
-            // Having visited this item, we don't care about its children,
-            // so don't call `walk_item`.
-        }
-
-        // We override `visit_expr` instead of the more specific expression
-        // visitors, so that we have direct access to the expression span.
-        fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
-            match expr.kind {
-                hir::ExprKind::Closure(_) | hir::ExprKind::ConstBlock(_) => {
-                    self.visit_hole_span(expr.span);
-                    // Having visited this expression, we don't care about its
-                    // children, so don't call `walk_expr`.
-                }
-
-                // For other expressions, recursively visit as normal.
-                _ => walk_expr(self, expr),
-            }
-        }
-    }
-    impl HolesVisitor<'_> {
-        fn visit_hole_span(&mut self, hole_span: Span) {
-            self.hole_spans.push(hole_span);
-        }
-    }
-
-    let mut visitor = HolesVisitor { tcx, hole_spans: vec![] };
-
-    visitor.visit_body(hir_body);
-    visitor.hole_spans
-}
diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs
index ccf76dc7108..63c550c27fe 100644
--- a/compiler/rustc_mir_transform/src/coverage/query.rs
+++ b/compiler/rustc_mir_transform/src/coverage/query.rs
@@ -1,3 +1,5 @@
+use rustc_hir::attrs::{AttributeKind, CoverageAttrKind};
+use rustc_hir::find_attr;
 use rustc_index::bit_set::DenseBitSet;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::mir::coverage::{BasicCoverageBlock, CoverageIdsInfo, CoverageKind, MappingKind};
@@ -5,7 +7,6 @@ use rustc_middle::mir::{Body, Statement, StatementKind};
 use rustc_middle::ty::{self, TyCtxt};
 use rustc_middle::util::Providers;
 use rustc_span::def_id::LocalDefId;
-use rustc_span::sym;
 use tracing::trace;
 
 use crate::coverage::counters::node_flow::make_node_counters;
@@ -32,16 +33,6 @@ fn is_eligible_for_coverage(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
         return false;
     }
 
-    // Don't instrument functions with `#[automatically_derived]` on their
-    // enclosing impl block, on the assumption that most users won't care about
-    // coverage for derived impls.
-    if let Some(impl_of) = tcx.impl_of_method(def_id.to_def_id())
-        && tcx.is_automatically_derived(impl_of)
-    {
-        trace!("InstrumentCoverage skipped for {def_id:?} (automatically derived)");
-        return false;
-    }
-
     if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NAKED) {
         trace!("InstrumentCoverage skipped for {def_id:?} (`#[naked]`)");
         return false;
@@ -57,22 +48,28 @@ fn is_eligible_for_coverage(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
 
 /// Query implementation for `coverage_attr_on`.
 fn coverage_attr_on(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
-    // Check for annotations directly on this def.
-    if let Some(attr) = tcx.get_attr(def_id, sym::coverage) {
-        match attr.meta_item_list().as_deref() {
-            Some([item]) if item.has_name(sym::off) => return false,
-            Some([item]) if item.has_name(sym::on) => return true,
-            Some(_) | None => {
-                // Other possibilities should have been rejected by `rustc_parse::validate_attr`.
-                // Use `span_delayed_bug` to avoid an ICE in failing builds (#127880).
-                tcx.dcx().span_delayed_bug(attr.span(), "unexpected value of coverage attribute");
-            }
+    // Check for a `#[coverage(..)]` attribute on this def.
+    if let Some(kind) =
+        find_attr!(tcx.get_all_attrs(def_id), AttributeKind::Coverage(_sp, kind) => kind)
+    {
+        match kind {
+            CoverageAttrKind::On => return true,
+            CoverageAttrKind::Off => return false,
         }
+    };
+
+    // Treat `#[automatically_derived]` as an implied `#[coverage(off)]`, on
+    // the assumption that most users won't want coverage for derived impls.
+    //
+    // This affects not just the associated items of an impl block, but also
+    // any closures and other nested functions within those associated items.
+    if tcx.is_automatically_derived(def_id.to_def_id()) {
+        return false;
     }
 
+    // Check the parent def (and so on recursively) until we find an
+    // enclosing attribute or reach the crate root.
     match tcx.opt_local_parent(def_id) {
-        // Check the parent def (and so on recursively) until we find an
-        // enclosing attribute or reach the crate root.
         Some(parent) => tcx.coverage_attr_on(parent),
         // We reached the crate root without seeing a coverage attribute, so
         // allow coverage instrumentation by default.
@@ -114,11 +111,6 @@ fn coverage_ids_info<'tcx>(
                 bcb_needs_counter.insert(true_bcb);
                 bcb_needs_counter.insert(false_bcb);
             }
-            MappingKind::MCDCBranch { true_bcb, false_bcb, mcdc_params: _ } => {
-                bcb_needs_counter.insert(true_bcb);
-                bcb_needs_counter.insert(false_bcb);
-            }
-            MappingKind::MCDCDecision(_) => {}
         }
     }
 
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
index ec76076020e..d1b04c8f587 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -1,12 +1,15 @@
 use rustc_data_structures::fx::FxHashSet;
 use rustc_middle::mir;
+use rustc_middle::mir::coverage::{Mapping, MappingKind, START_BCB};
 use rustc_middle::ty::TyCtxt;
-use rustc_span::{DesugaringKind, ExpnKind, MacroKind, Span};
+use rustc_span::source_map::SourceMap;
+use rustc_span::{BytePos, DesugaringKind, ExpnKind, MacroKind, Span};
 use tracing::instrument;
 
 use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph};
+use crate::coverage::hir_info::ExtractedHirInfo;
 use crate::coverage::spans::from_mir::{Hole, RawSpanFromMir, SpanFromMir};
-use crate::coverage::{ExtractedHirInfo, mappings, unexpand};
+use crate::coverage::unexpand;
 
 mod from_mir;
 
@@ -15,8 +18,19 @@ pub(super) fn extract_refined_covspans<'tcx>(
     mir_body: &mir::Body<'tcx>,
     hir_info: &ExtractedHirInfo,
     graph: &CoverageGraph,
-    code_mappings: &mut impl Extend<mappings::CodeMapping>,
+    mappings: &mut Vec<Mapping>,
 ) {
+    if hir_info.is_async_fn {
+        // An async function desugars into a function that returns a future,
+        // with the user code wrapped in a closure. Any spans in the desugared
+        // outer function will be unhelpful, so just keep the signature span
+        // and ignore all of the spans in the MIR body.
+        if let Some(span) = hir_info.fn_sig_span {
+            mappings.push(Mapping { span, kind: MappingKind::Code { bcb: START_BCB } })
+        }
+        return;
+    }
+
     let &ExtractedHirInfo { body_span, .. } = hir_info;
 
     let raw_spans = from_mir::extract_raw_spans_from_mir(mir_body, graph);
@@ -83,13 +97,23 @@ pub(super) fn extract_refined_covspans<'tcx>(
     // Discard any span that overlaps with a hole.
     discard_spans_overlapping_holes(&mut covspans, &holes);
 
-    // Perform more refinement steps after holes have been dealt with.
+    // Discard spans that overlap in unwanted ways.
     let mut covspans = remove_unwanted_overlapping_spans(covspans);
+
+    // For all empty spans, either enlarge them to be non-empty, or discard them.
+    let source_map = tcx.sess.source_map();
+    covspans.retain_mut(|covspan| {
+        let Some(span) = ensure_non_empty_span(source_map, covspan.span) else { return false };
+        covspan.span = span;
+        true
+    });
+
+    // Merge covspans that can be merged.
     covspans.dedup_by(|b, a| a.merge_if_eligible(b));
 
-    code_mappings.extend(covspans.into_iter().map(|Covspan { span, bcb }| {
+    mappings.extend(covspans.into_iter().map(|Covspan { span, bcb }| {
         // Each span produced by the refiner represents an ordinary code region.
-        mappings::CodeMapping { span, bcb }
+        Mapping { span, kind: MappingKind::Code { bcb } }
     }));
 }
 
@@ -230,3 +254,26 @@ fn compare_spans(a: Span, b: Span) -> std::cmp::Ordering {
         // - Both have the same start and span A extends further right
         .then_with(|| Ord::cmp(&a.hi(), &b.hi()).reverse())
 }
+
+fn ensure_non_empty_span(source_map: &SourceMap, span: Span) -> Option<Span> {
+    if !span.is_empty() {
+        return Some(span);
+    }
+
+    // The span is empty, so try to enlarge it to cover an adjacent '{' or '}'.
+    source_map
+        .span_to_source(span, |src, start, end| try {
+            // Adjusting span endpoints by `BytePos(1)` is normally a bug,
+            // but in this case we have specifically checked that the character
+            // we're skipping over is one of two specific ASCII characters, so
+            // adjusting by exactly 1 byte is correct.
+            if src.as_bytes().get(end).copied() == Some(b'{') {
+                Some(span.with_hi(span.hi() + BytePos(1)))
+            } else if start > 0 && src.as_bytes()[start - 1] == b'}' {
+                Some(span.with_lo(span.lo() - BytePos(1)))
+            } else {
+                None
+            }
+        })
+        .ok()?
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
index 804cd8ab3f7..7985e1c0798 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
@@ -101,11 +101,7 @@ fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
         StatementKind::Coverage(CoverageKind::BlockMarker { .. }) => None,
 
         // These coverage statements should not exist prior to coverage instrumentation.
-        StatementKind::Coverage(
-            CoverageKind::VirtualCounter { .. }
-            | CoverageKind::CondBitmapUpdate { .. }
-            | CoverageKind::TestVectorBitmapUpdate { .. },
-        ) => bug!(
+        StatementKind::Coverage(CoverageKind::VirtualCounter { .. }) => bug!(
             "Unexpected coverage statement found during coverage instrumentation: {statement:?}"
         ),
     }
diff --git a/compiler/rustc_mir_transform/src/cross_crate_inline.rs b/compiler/rustc_mir_transform/src/cross_crate_inline.rs
index 6d7b7e10ef6..b186c2bd775 100644
--- a/compiler/rustc_mir_transform/src/cross_crate_inline.rs
+++ b/compiler/rustc_mir_transform/src/cross_crate_inline.rs
@@ -1,4 +1,4 @@
-use rustc_attr_data_structures::InlineAttr;
+use rustc_hir::attrs::InlineAttr;
 use rustc_hir::def::DefKind;
 use rustc_hir::def_id::LocalDefId;
 use rustc_middle::mir::visit::Visitor;
diff --git a/compiler/rustc_mir_transform/src/ctfe_limit.rs b/compiler/rustc_mir_transform/src/ctfe_limit.rs
index fb17cca30f4..ac46336b834 100644
--- a/compiler/rustc_mir_transform/src/ctfe_limit.rs
+++ b/compiler/rustc_mir_transform/src/ctfe_limit.rs
@@ -18,7 +18,7 @@ impl<'tcx> crate::MirPass<'tcx> for CtfeLimit {
             .basic_blocks
             .iter_enumerated()
             .filter_map(|(node, node_data)| {
-                if matches!(node_data.terminator().kind, TerminatorKind::Call { .. })
+                if matches!(node_data.terminator().kind, TerminatorKind::Call { .. } | TerminatorKind::TailCall { .. })
                     // Back edges in a CFG indicate loops
                     || has_back_edge(doms, node, node_data)
                 {
diff --git a/compiler/rustc_mir_transform/src/elaborate_drop.rs b/compiler/rustc_mir_transform/src/elaborate_drop.rs
index de96b1f255a..4f3c53d761f 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drop.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drop.rs
@@ -611,6 +611,7 @@ where
     ///
     /// For example, with 3 fields, the drop ladder is
     ///
+    /// ```text
     /// .d0:
     ///     ELAB(drop location.0 [target=.d1, unwind=.c1])
     /// .d1:
@@ -621,8 +622,10 @@ where
     ///     ELAB(drop location.1 [target=.c2])
     /// .c2:
     ///     ELAB(drop location.2 [target=`self.unwind`])
+    /// ```
     ///
     /// For possible-async drops in coroutines we also need dropline ladder
+    /// ```text
     /// .d0 (mainline):
     ///     ELAB(drop location.0 [target=.d1, unwind=.c1, drop=.e1])
     /// .d1 (mainline):
@@ -637,6 +640,7 @@ where
     ///     ELAB(drop location.1 [target=.e2, unwind=.c2])
     /// .e2 (dropline):
     ///     ELAB(drop location.2 [target=`self.drop`, unwind=`self.unwind`])
+    /// ```
     ///
     /// NOTE: this does not clear the master drop flag, so you need
     /// to point succ/unwind on a `drop_ladder_bottom`.
@@ -761,24 +765,37 @@ where
 
         let skip_contents = adt.is_union() || adt.is_manually_drop();
         let contents_drop = if skip_contents {
+            if adt.has_dtor(self.tcx()) && self.elaborator.get_drop_flag(self.path).is_some() {
+                // the top-level drop flag is usually cleared by open_drop_for_adt_contents
+                // types with destructors would still need an empty drop ladder to clear it
+
+                // however, these types are only open dropped in `DropShimElaborator`
+                // which does not have drop flags
+                // a future box-like "DerefMove" trait would allow for this case to happen
+                span_bug!(self.source_info.span, "open dropping partially moved union");
+            }
+
             (self.succ, self.unwind, self.dropline)
         } else {
             self.open_drop_for_adt_contents(adt, args)
         };
 
-        if adt.is_box() {
-            // we need to drop the inside of the box before running the destructor
-            let succ = self.destructor_call_block_sync((contents_drop.0, contents_drop.1));
-            let unwind = contents_drop
-                .1
-                .map(|unwind| self.destructor_call_block_sync((unwind, Unwind::InCleanup)));
-            let dropline = contents_drop
-                .2
-                .map(|dropline| self.destructor_call_block_sync((dropline, contents_drop.1)));
-
-            self.open_drop_for_box_contents(adt, args, succ, unwind, dropline)
-        } else if adt.has_dtor(self.tcx()) {
-            self.destructor_call_block(contents_drop)
+        if adt.has_dtor(self.tcx()) {
+            let destructor_block = if adt.is_box() {
+                // we need to drop the inside of the box before running the destructor
+                let succ = self.destructor_call_block_sync((contents_drop.0, contents_drop.1));
+                let unwind = contents_drop
+                    .1
+                    .map(|unwind| self.destructor_call_block_sync((unwind, Unwind::InCleanup)));
+                let dropline = contents_drop
+                    .2
+                    .map(|dropline| self.destructor_call_block_sync((dropline, contents_drop.1)));
+                self.open_drop_for_box_contents(adt, args, succ, unwind, dropline)
+            } else {
+                self.destructor_call_block(contents_drop)
+            };
+
+            self.drop_flag_test_block(destructor_block, contents_drop.0, contents_drop.1)
         } else {
             contents_drop.0
         }
@@ -982,12 +999,7 @@ where
             unwind.is_cleanup(),
         );
 
-        let destructor_block = self.elaborator.patch().new_block(result);
-
-        let block_start = Location { block: destructor_block, statement_index: 0 };
-        self.elaborator.clear_drop_flag(block_start, self.path, DropFlagMode::Shallow);
-
-        self.drop_flag_test_block(destructor_block, succ, unwind)
+        self.elaborator.patch().new_block(result)
     }
 
     fn destructor_call_block(
@@ -1002,13 +1014,7 @@ where
             && !unwind.is_cleanup()
             && ty.is_async_drop(self.tcx(), self.elaborator.typing_env())
         {
-            let destructor_block =
-                self.build_async_drop(self.place, ty, None, succ, unwind, dropline, true);
-
-            let block_start = Location { block: destructor_block, statement_index: 0 };
-            self.elaborator.clear_drop_flag(block_start, self.path, DropFlagMode::Shallow);
-
-            self.drop_flag_test_block(destructor_block, succ, unwind)
+            self.build_async_drop(self.place, ty, None, succ, unwind, dropline, true)
         } else {
             self.destructor_call_block_sync((succ, unwind))
         }
diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs
index b4fa2be1d00..58dff4514a0 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs
@@ -253,8 +253,8 @@ struct ElaborateDropsCtxt<'a, 'tcx> {
 }
 
 impl fmt::Debug for ElaborateDropsCtxt<'_, '_> {
-    fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        Ok(())
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("ElaborateDropsCtxt").finish_non_exhaustive()
     }
 }
 
diff --git a/compiler/rustc_mir_transform/src/errors.rs b/compiler/rustc_mir_transform/src/errors.rs
index cffa0183fa7..ad9635aae33 100644
--- a/compiler/rustc_mir_transform/src/errors.rs
+++ b/compiler/rustc_mir_transform/src/errors.rs
@@ -117,14 +117,6 @@ pub(crate) struct FnItemRef {
     pub ident: Ident,
 }
 
-#[derive(Diagnostic)]
-#[diag(mir_transform_exceeds_mcdc_test_vector_limit)]
-pub(crate) struct MCDCExceedsTestVectorLimit {
-    #[primary_span]
-    pub(crate) span: Span,
-    pub(crate) max_num_test_vectors: usize,
-}
-
 pub(crate) struct MustNotSupend<'a, 'tcx> {
     pub tcx: TyCtxt<'tcx>,
     pub yield_sp: Span,
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index 6b11706d2b5..5a13394543b 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -105,7 +105,6 @@ use rustc_middle::mir::*;
 use rustc_middle::ty::layout::HasTypingEnv;
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_span::DUMMY_SP;
-use rustc_span::def_id::DefId;
 use smallvec::SmallVec;
 use tracing::{debug, instrument, trace};
 
@@ -130,7 +129,7 @@ impl<'tcx> crate::MirPass<'tcx> for GVN {
         let mut state = VnState::new(tcx, body, typing_env, &ssa, dominators, &body.local_decls);
 
         for local in body.args_iter().filter(|&local| ssa.is_ssa(local)) {
-            let opaque = state.new_opaque();
+            let opaque = state.new_opaque(body.local_decls[local].ty);
             state.assign(local, opaque);
         }
 
@@ -155,22 +154,6 @@ newtype_index! {
     struct VnIndex {}
 }
 
-/// Computing the aggregate's type can be quite slow, so we only keep the minimal amount of
-/// information to reconstruct it when needed.
-#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
-enum AggregateTy<'tcx> {
-    /// Invariant: this must not be used for an empty array.
-    Array,
-    Tuple,
-    Def(DefId, ty::GenericArgsRef<'tcx>),
-    RawPtr {
-        /// Needed for cast propagation.
-        data_pointer_ty: Ty<'tcx>,
-        /// The data pointer can be anything thin, so doesn't determine the output.
-        output_pointer_ty: Ty<'tcx>,
-    },
-}
-
 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
 enum AddressKind {
     Ref(BorrowKind),
@@ -193,7 +176,14 @@ enum Value<'tcx> {
     },
     /// An aggregate value, either tuple/closure/struct/enum.
     /// This does not contain unions, as we cannot reason with the value.
-    Aggregate(AggregateTy<'tcx>, VariantIdx, Vec<VnIndex>),
+    Aggregate(VariantIdx, Vec<VnIndex>),
+    /// A raw pointer aggregate built from a thin pointer and metadata.
+    RawPtr {
+        /// Thin pointer component. This is field 0 in MIR.
+        pointer: VnIndex,
+        /// Metadata component. This is field 1 in MIR.
+        metadata: VnIndex,
+    },
     /// This corresponds to a `[value; count]` expression.
     Repeat(VnIndex, ty::Const<'tcx>),
     /// The address of a place.
@@ -206,7 +196,7 @@ enum Value<'tcx> {
 
     // Extractions.
     /// This is the *value* obtained by projecting another value.
-    Projection(VnIndex, ProjectionElem<VnIndex, Ty<'tcx>>),
+    Projection(VnIndex, ProjectionElem<VnIndex, ()>),
     /// Discriminant of the given value.
     Discriminant(VnIndex),
     /// Length of an array or slice.
@@ -219,8 +209,6 @@ enum Value<'tcx> {
     Cast {
         kind: CastKind,
         value: VnIndex,
-        from: Ty<'tcx>,
-        to: Ty<'tcx>,
     },
 }
 
@@ -228,12 +216,13 @@ struct VnState<'body, 'tcx> {
     tcx: TyCtxt<'tcx>,
     ecx: InterpCx<'tcx, DummyMachine>,
     local_decls: &'body LocalDecls<'tcx>,
+    is_coroutine: bool,
     /// Value stored in each local.
     locals: IndexVec<Local, Option<VnIndex>>,
     /// Locals that are assigned that value.
     // This vector does not hold all the values of `VnIndex` that we create.
     rev_locals: IndexVec<VnIndex, SmallVec<[Local; 1]>>,
-    values: FxIndexSet<Value<'tcx>>,
+    values: FxIndexSet<(Value<'tcx>, Ty<'tcx>)>,
     /// Values evaluated as constants if possible.
     evaluated: IndexVec<VnIndex, Option<OpTy<'tcx>>>,
     /// Counter to generate different values.
@@ -265,6 +254,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             tcx,
             ecx: InterpCx::new(tcx, DUMMY_SP, typing_env, DummyMachine),
             local_decls,
+            is_coroutine: body.coroutine.is_some(),
             locals: IndexVec::from_elem(None, local_decls),
             rev_locals: IndexVec::with_capacity(num_values),
             values: FxIndexSet::with_capacity_and_hasher(num_values, Default::default()),
@@ -282,8 +272,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
     }
 
     #[instrument(level = "trace", skip(self), ret)]
-    fn insert(&mut self, value: Value<'tcx>) -> VnIndex {
-        let (index, new) = self.values.insert_full(value);
+    fn insert(&mut self, ty: Ty<'tcx>, value: Value<'tcx>) -> VnIndex {
+        let (index, new) = self.values.insert_full((value, ty));
         let index = VnIndex::from_usize(index);
         if new {
             // Grow `evaluated` and `rev_locals` here to amortize the allocations.
@@ -305,20 +295,33 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
     /// Create a new `Value` for which we have no information at all, except that it is distinct
     /// from all the others.
     #[instrument(level = "trace", skip(self), ret)]
-    fn new_opaque(&mut self) -> VnIndex {
+    fn new_opaque(&mut self, ty: Ty<'tcx>) -> VnIndex {
         let value = Value::Opaque(self.next_opaque());
-        self.insert(value)
+        self.insert(ty, value)
     }
 
     /// Create a new `Value::Address` distinct from all the others.
     #[instrument(level = "trace", skip(self), ret)]
     fn new_pointer(&mut self, place: Place<'tcx>, kind: AddressKind) -> VnIndex {
+        let pty = place.ty(self.local_decls, self.tcx).ty;
+        let ty = match kind {
+            AddressKind::Ref(bk) => {
+                Ty::new_ref(self.tcx, self.tcx.lifetimes.re_erased, pty, bk.to_mutbl_lossy())
+            }
+            AddressKind::Address(mutbl) => Ty::new_ptr(self.tcx, pty, mutbl.to_mutbl_lossy()),
+        };
         let value = Value::Address { place, kind, provenance: self.next_opaque() };
-        self.insert(value)
+        self.insert(ty, value)
     }
 
+    #[inline]
     fn get(&self, index: VnIndex) -> &Value<'tcx> {
-        self.values.get_index(index.as_usize()).unwrap()
+        &self.values.get_index(index.as_usize()).unwrap().0
+    }
+
+    #[inline]
+    fn ty(&self, index: VnIndex) -> Ty<'tcx> {
+        self.values.get_index(index.as_usize()).unwrap().1
     }
 
     /// Record that `local` is assigned `value`. `local` must be SSA.
@@ -341,29 +344,29 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             debug_assert_ne!(disambiguator, 0);
             disambiguator
         };
-        self.insert(Value::Constant { value, disambiguator })
+        self.insert(value.ty(), Value::Constant { value, disambiguator })
     }
 
     fn insert_bool(&mut self, flag: bool) -> VnIndex {
         // Booleans are deterministic.
         let value = Const::from_bool(self.tcx, flag);
         debug_assert!(value.is_deterministic());
-        self.insert(Value::Constant { value, disambiguator: 0 })
+        self.insert(self.tcx.types.bool, Value::Constant { value, disambiguator: 0 })
     }
 
-    fn insert_scalar(&mut self, scalar: Scalar, ty: Ty<'tcx>) -> VnIndex {
+    fn insert_scalar(&mut self, ty: Ty<'tcx>, scalar: Scalar) -> VnIndex {
         // Scalars are deterministic.
         let value = Const::from_scalar(self.tcx, scalar, ty);
         debug_assert!(value.is_deterministic());
-        self.insert(Value::Constant { value, disambiguator: 0 })
+        self.insert(ty, Value::Constant { value, disambiguator: 0 })
     }
 
-    fn insert_tuple(&mut self, values: Vec<VnIndex>) -> VnIndex {
-        self.insert(Value::Aggregate(AggregateTy::Tuple, VariantIdx::ZERO, values))
+    fn insert_tuple(&mut self, ty: Ty<'tcx>, values: Vec<VnIndex>) -> VnIndex {
+        self.insert(ty, Value::Aggregate(VariantIdx::ZERO, values))
     }
 
-    fn insert_deref(&mut self, value: VnIndex) -> VnIndex {
-        let value = self.insert(Value::Projection(value, ProjectionElem::Deref));
+    fn insert_deref(&mut self, ty: Ty<'tcx>, value: VnIndex) -> VnIndex {
+        let value = self.insert(ty, Value::Projection(value, ProjectionElem::Deref));
         self.derefs.push(value);
         value
     }
@@ -371,14 +374,23 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
     fn invalidate_derefs(&mut self) {
         for deref in std::mem::take(&mut self.derefs) {
             let opaque = self.next_opaque();
-            *self.values.get_index_mut2(deref.index()).unwrap() = Value::Opaque(opaque);
+            self.values.get_index_mut2(deref.index()).unwrap().0 = Value::Opaque(opaque);
         }
     }
 
     #[instrument(level = "trace", skip(self), ret)]
     fn eval_to_const(&mut self, value: VnIndex) -> Option<OpTy<'tcx>> {
         use Value::*;
+        let ty = self.ty(value);
+        // Avoid computing layouts inside a coroutine, as that can cause cycles.
+        let ty = if !self.is_coroutine || ty.is_scalar() {
+            self.ecx.layout_of(ty).ok()?
+        } else {
+            return None;
+        };
         let op = match *self.get(value) {
+            _ if ty.is_zst() => ImmTy::uninit(ty).into(),
+
             Opaque(_) => return None,
             // Do not bother evaluating repeat expressions. This would uselessly consume memory.
             Repeat(..) => return None,
@@ -386,42 +398,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             Constant { ref value, disambiguator: _ } => {
                 self.ecx.eval_mir_constant(value, DUMMY_SP, None).discard_err()?
             }
-            Aggregate(kind, variant, ref fields) => {
+            Aggregate(variant, ref fields) => {
                 let fields = fields
                     .iter()
                     .map(|&f| self.evaluated[f].as_ref())
                     .collect::<Option<Vec<_>>>()?;
-                let ty = match kind {
-                    AggregateTy::Array => {
-                        assert!(fields.len() > 0);
-                        Ty::new_array(self.tcx, fields[0].layout.ty, fields.len() as u64)
-                    }
-                    AggregateTy::Tuple => {
-                        Ty::new_tup_from_iter(self.tcx, fields.iter().map(|f| f.layout.ty))
-                    }
-                    AggregateTy::Def(def_id, args) => {
-                        self.tcx.type_of(def_id).instantiate(self.tcx, args)
-                    }
-                    AggregateTy::RawPtr { output_pointer_ty, .. } => output_pointer_ty,
-                };
-                let variant = if ty.is_enum() { Some(variant) } else { None };
-                let ty = self.ecx.layout_of(ty).ok()?;
-                if ty.is_zst() {
-                    ImmTy::uninit(ty).into()
-                } else if matches!(kind, AggregateTy::RawPtr { .. }) {
-                    // Pointers don't have fields, so don't `project_field` them.
-                    let data = self.ecx.read_pointer(fields[0]).discard_err()?;
-                    let meta = if fields[1].layout.is_zst() {
-                        MemPlaceMeta::None
-                    } else {
-                        MemPlaceMeta::Meta(self.ecx.read_scalar(fields[1]).discard_err()?)
-                    };
-                    let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx);
-                    ImmTy::from_immediate(ptr_imm, ty).into()
-                } else if matches!(
-                    ty.backend_repr,
-                    BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)
-                ) {
+                let variant = if ty.ty.is_enum() { Some(variant) } else { None };
+                if matches!(ty.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..))
+                {
                     let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?;
                     let variant_dest = if let Some(variant) = variant {
                         self.ecx.project_downcast(&dest, variant).discard_err()?
@@ -446,60 +430,43 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     return None;
                 }
             }
+            RawPtr { pointer, metadata } => {
+                let pointer = self.evaluated[pointer].as_ref()?;
+                let metadata = self.evaluated[metadata].as_ref()?;
+
+                // Pointers don't have fields, so don't `project_field` them.
+                let data = self.ecx.read_pointer(pointer).discard_err()?;
+                let meta = if metadata.layout.is_zst() {
+                    MemPlaceMeta::None
+                } else {
+                    MemPlaceMeta::Meta(self.ecx.read_scalar(metadata).discard_err()?)
+                };
+                let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx);
+                ImmTy::from_immediate(ptr_imm, ty).into()
+            }
 
             Projection(base, elem) => {
-                let value = self.evaluated[base].as_ref()?;
-                let elem = match elem {
-                    ProjectionElem::Deref => ProjectionElem::Deref,
-                    ProjectionElem::Downcast(name, read_variant) => {
-                        ProjectionElem::Downcast(name, read_variant)
-                    }
-                    ProjectionElem::Field(f, ty) => ProjectionElem::Field(f, ty),
-                    ProjectionElem::ConstantIndex { offset, min_length, from_end } => {
-                        ProjectionElem::ConstantIndex { offset, min_length, from_end }
-                    }
-                    ProjectionElem::Subslice { from, to, from_end } => {
-                        ProjectionElem::Subslice { from, to, from_end }
-                    }
-                    ProjectionElem::OpaqueCast(ty) => ProjectionElem::OpaqueCast(ty),
-                    ProjectionElem::Subtype(ty) => ProjectionElem::Subtype(ty),
-                    ProjectionElem::UnwrapUnsafeBinder(ty) => {
-                        ProjectionElem::UnwrapUnsafeBinder(ty)
-                    }
-                    // This should have been replaced by a `ConstantIndex` earlier.
-                    ProjectionElem::Index(_) => return None,
-                };
-                self.ecx.project(value, elem).discard_err()?
+                let base = self.evaluated[base].as_ref()?;
+                // `Index` by constants should have been replaced by `ConstantIndex` by
+                // `simplify_place_projection`.
+                let elem = elem.try_map(|_| None, |()| ty.ty)?;
+                self.ecx.project(base, elem).discard_err()?
             }
-            Address { place, kind, provenance: _ } => {
+            Address { place, kind: _, provenance: _ } => {
                 if !place.is_indirect_first_projection() {
                     return None;
                 }
                 let local = self.locals[place.local]?;
                 let pointer = self.evaluated[local].as_ref()?;
                 let mut mplace = self.ecx.deref_pointer(pointer).discard_err()?;
-                for proj in place.projection.iter().skip(1) {
-                    // We have no call stack to associate a local with a value, so we cannot
-                    // interpret indexing.
-                    if matches!(proj, ProjectionElem::Index(_)) {
-                        return None;
-                    }
-                    mplace = self.ecx.project(&mplace, proj).discard_err()?;
+                for elem in place.projection.iter().skip(1) {
+                    // `Index` by constants should have been replaced by `ConstantIndex` by
+                    // `simplify_place_projection`.
+                    let elem = elem.try_map(|_| None, |ty| ty)?;
+                    mplace = self.ecx.project(&mplace, elem).discard_err()?;
                 }
                 let pointer = mplace.to_ref(&self.ecx);
-                let ty = match kind {
-                    AddressKind::Ref(bk) => Ty::new_ref(
-                        self.tcx,
-                        self.tcx.lifetimes.re_erased,
-                        mplace.layout.ty,
-                        bk.to_mutbl_lossy(),
-                    ),
-                    AddressKind::Address(mutbl) => {
-                        Ty::new_ptr(self.tcx, mplace.layout.ty, mutbl.to_mutbl_lossy())
-                    }
-                };
-                let layout = self.ecx.layout_of(ty).ok()?;
-                ImmTy::from_immediate(pointer, layout).into()
+                ImmTy::from_immediate(pointer, ty).into()
             }
 
             Discriminant(base) => {
@@ -511,32 +478,28 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             }
             Len(slice) => {
                 let slice = self.evaluated[slice].as_ref()?;
-                let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
                 let len = slice.len(&self.ecx).discard_err()?;
-                let imm = ImmTy::from_uint(len, usize_layout);
-                imm.into()
+                ImmTy::from_uint(len, ty).into()
             }
-            NullaryOp(null_op, ty) => {
-                let layout = self.ecx.layout_of(ty).ok()?;
+            NullaryOp(null_op, arg_ty) => {
+                let arg_layout = self.ecx.layout_of(arg_ty).ok()?;
                 if let NullOp::SizeOf | NullOp::AlignOf = null_op
-                    && layout.is_unsized()
+                    && arg_layout.is_unsized()
                 {
                     return None;
                 }
                 let val = match null_op {
-                    NullOp::SizeOf => layout.size.bytes(),
-                    NullOp::AlignOf => layout.align.abi.bytes(),
+                    NullOp::SizeOf => arg_layout.size.bytes(),
+                    NullOp::AlignOf => arg_layout.align.abi.bytes(),
                     NullOp::OffsetOf(fields) => self
                         .ecx
                         .tcx
-                        .offset_of_subfield(self.typing_env(), layout, fields.iter())
+                        .offset_of_subfield(self.typing_env(), arg_layout, fields.iter())
                         .bytes(),
                     NullOp::UbChecks => return None,
                     NullOp::ContractChecks => return None,
                 };
-                let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
-                let imm = ImmTy::from_uint(val, usize_layout);
-                imm.into()
+                ImmTy::from_uint(val, ty).into()
             }
             UnaryOp(un_op, operand) => {
                 let operand = self.evaluated[operand].as_ref()?;
@@ -552,30 +515,27 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 let val = self.ecx.binary_op(bin_op, &lhs, &rhs).discard_err()?;
                 val.into()
             }
-            Cast { kind, value, from: _, to } => match kind {
+            Cast { kind, value } => match kind {
                 CastKind::IntToInt | CastKind::IntToFloat => {
                     let value = self.evaluated[value].as_ref()?;
                     let value = self.ecx.read_immediate(value).discard_err()?;
-                    let to = self.ecx.layout_of(to).ok()?;
-                    let res = self.ecx.int_to_int_or_float(&value, to).discard_err()?;
+                    let res = self.ecx.int_to_int_or_float(&value, ty).discard_err()?;
                     res.into()
                 }
                 CastKind::FloatToFloat | CastKind::FloatToInt => {
                     let value = self.evaluated[value].as_ref()?;
                     let value = self.ecx.read_immediate(value).discard_err()?;
-                    let to = self.ecx.layout_of(to).ok()?;
-                    let res = self.ecx.float_to_float_or_int(&value, to).discard_err()?;
+                    let res = self.ecx.float_to_float_or_int(&value, ty).discard_err()?;
                     res.into()
                 }
                 CastKind::Transmute => {
                     let value = self.evaluated[value].as_ref()?;
-                    let to = self.ecx.layout_of(to).ok()?;
                     // `offset` for immediates generally only supports projections that match the
                     // type of the immediate. However, as a HACK, we exploit that it can also do
                     // limited transmutes: it only works between types with the same layout, and
                     // cannot transmute pointers to integers.
                     if value.as_mplace_or_imm().is_right() {
-                        let can_transmute = match (value.layout.backend_repr, to.backend_repr) {
+                        let can_transmute = match (value.layout.backend_repr, ty.backend_repr) {
                             (BackendRepr::Scalar(s1), BackendRepr::Scalar(s2)) => {
                                 s1.size(&self.ecx) == s2.size(&self.ecx)
                                     && !matches!(s1.primitive(), Primitive::Pointer(..))
@@ -595,13 +555,12 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                             return None;
                         }
                     }
-                    value.offset(Size::ZERO, to, &self.ecx).discard_err()?
+                    value.offset(Size::ZERO, ty, &self.ecx).discard_err()?
                 }
                 CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize, _) => {
                     let src = self.evaluated[value].as_ref()?;
-                    let to = self.ecx.layout_of(to).ok()?;
-                    let dest = self.ecx.allocate(to, MemoryKind::Stack).discard_err()?;
-                    self.ecx.unsize_into(src, to, &dest.clone().into()).discard_err()?;
+                    let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?;
+                    self.ecx.unsize_into(src, ty, &dest).discard_err()?;
                     self.ecx
                         .alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id())
                         .discard_err()?;
@@ -610,15 +569,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
                     let src = self.evaluated[value].as_ref()?;
                     let src = self.ecx.read_immediate(src).discard_err()?;
-                    let to = self.ecx.layout_of(to).ok()?;
-                    let ret = self.ecx.ptr_to_ptr(&src, to).discard_err()?;
+                    let ret = self.ecx.ptr_to_ptr(&src, ty).discard_err()?;
                     ret.into()
                 }
                 CastKind::PointerCoercion(ty::adjustment::PointerCoercion::UnsafeFnPointer, _) => {
                     let src = self.evaluated[value].as_ref()?;
                     let src = self.ecx.read_immediate(src).discard_err()?;
-                    let to = self.ecx.layout_of(to).ok()?;
-                    ImmTy::from_immediate(*src, to).into()
+                    ImmTy::from_immediate(*src, ty).into()
                 }
                 _ => return None,
             },
@@ -628,31 +585,30 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
 
     fn project(
         &mut self,
-        place: PlaceRef<'tcx>,
+        place_ty: PlaceTy<'tcx>,
         value: VnIndex,
         proj: PlaceElem<'tcx>,
         from_non_ssa_index: &mut bool,
-    ) -> Option<VnIndex> {
+    ) -> Option<(PlaceTy<'tcx>, VnIndex)> {
+        let projection_ty = place_ty.projection_ty(self.tcx, proj);
         let proj = match proj {
             ProjectionElem::Deref => {
-                let ty = place.ty(self.local_decls, self.tcx).ty;
-                if let Some(Mutability::Not) = ty.ref_mutability()
-                    && let Some(pointee_ty) = ty.builtin_deref(true)
-                    && pointee_ty.is_freeze(self.tcx, self.typing_env())
+                if let Some(Mutability::Not) = place_ty.ty.ref_mutability()
+                    && projection_ty.ty.is_freeze(self.tcx, self.typing_env())
                 {
                     // An immutable borrow `_x` always points to the same value for the
                     // lifetime of the borrow, so we can merge all instances of `*_x`.
-                    return Some(self.insert_deref(value));
+                    return Some((projection_ty, self.insert_deref(projection_ty.ty, value)));
                 } else {
                     return None;
                 }
             }
             ProjectionElem::Downcast(name, index) => ProjectionElem::Downcast(name, index),
-            ProjectionElem::Field(f, ty) => {
-                if let Value::Aggregate(_, _, fields) = self.get(value) {
-                    return Some(fields[f.as_usize()]);
+            ProjectionElem::Field(f, _) => {
+                if let Value::Aggregate(_, fields) = self.get(value) {
+                    return Some((projection_ty, fields[f.as_usize()]));
                 } else if let Value::Projection(outer_value, ProjectionElem::Downcast(_, read_variant)) = self.get(value)
-                    && let Value::Aggregate(_, written_variant, fields) = self.get(*outer_value)
+                    && let Value::Aggregate(written_variant, fields) = self.get(*outer_value)
                     // This pass is not aware of control-flow, so we do not know whether the
                     // replacement we are doing is actually reachable. We could be in any arm of
                     // ```
@@ -670,14 +626,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     // a downcast to an inactive variant.
                     && written_variant == read_variant
                 {
-                    return Some(fields[f.as_usize()]);
+                    return Some((projection_ty, fields[f.as_usize()]));
                 }
-                ProjectionElem::Field(f, ty)
+                ProjectionElem::Field(f, ())
             }
             ProjectionElem::Index(idx) => {
                 if let Value::Repeat(inner, _) = self.get(value) {
                     *from_non_ssa_index |= self.locals[idx].is_none();
-                    return Some(*inner);
+                    return Some((projection_ty, *inner));
                 }
                 let idx = self.locals[idx]?;
                 ProjectionElem::Index(idx)
@@ -685,15 +641,16 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             ProjectionElem::ConstantIndex { offset, min_length, from_end } => {
                 match self.get(value) {
                     Value::Repeat(inner, _) => {
-                        return Some(*inner);
+                        return Some((projection_ty, *inner));
                     }
-                    Value::Aggregate(AggregateTy::Array, _, operands) => {
+                    Value::Aggregate(_, operands) => {
                         let offset = if from_end {
                             operands.len() - offset as usize
                         } else {
                             offset as usize
                         };
-                        return operands.get(offset).copied();
+                        let value = operands.get(offset).copied()?;
+                        return Some((projection_ty, value));
                     }
                     _ => {}
                 };
@@ -702,12 +659,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             ProjectionElem::Subslice { from, to, from_end } => {
                 ProjectionElem::Subslice { from, to, from_end }
             }
-            ProjectionElem::OpaqueCast(ty) => ProjectionElem::OpaqueCast(ty),
-            ProjectionElem::Subtype(ty) => ProjectionElem::Subtype(ty),
-            ProjectionElem::UnwrapUnsafeBinder(ty) => ProjectionElem::UnwrapUnsafeBinder(ty),
+            ProjectionElem::OpaqueCast(_) => ProjectionElem::OpaqueCast(()),
+            ProjectionElem::Subtype(_) => ProjectionElem::Subtype(()),
+            ProjectionElem::UnwrapUnsafeBinder(_) => ProjectionElem::UnwrapUnsafeBinder(()),
         };
 
-        Some(self.insert(Value::Projection(value, proj)))
+        let value = self.insert(projection_ty.ty, Value::Projection(value, proj));
+        Some((projection_ty, value))
     }
 
     /// Simplify the projection chain if we know better.
@@ -769,6 +727,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
 
         // Invariant: `value` holds the value up-to the `index`th projection excluded.
         let mut value = self.locals[place.local]?;
+        // Invariant: `value` has type `place_ty`, with optional downcast variant if needed.
+        let mut place_ty = PlaceTy::from_ty(self.local_decls[place.local].ty);
         let mut from_non_ssa_index = false;
         for (index, proj) in place.projection.iter().enumerate() {
             if let Value::Projection(pointer, ProjectionElem::Deref) = *self.get(value)
@@ -777,7 +737,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 && let Some(v) = self.simplify_place_value(&mut pointee, location)
             {
                 value = v;
-                place_ref = pointee.project_deeper(&place.projection[index..], self.tcx).as_ref();
+                // `pointee` holds a `Place`, so `ProjectionElem::Index` holds a `Local`.
+                // That local is SSA, but we otherwise have no guarantee on that local's value at
+                // the current location compared to its value where `pointee` was borrowed.
+                if pointee.projection.iter().all(|elem| !matches!(elem, ProjectionElem::Index(_))) {
+                    place_ref =
+                        pointee.project_deeper(&place.projection[index..], self.tcx).as_ref();
+                }
             }
             if let Some(local) = self.try_as_local(value, location) {
                 // Both `local` and `Place { local: place.local, projection: projection[..index] }`
@@ -786,8 +752,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 place_ref = PlaceRef { local, projection: &place.projection[index..] };
             }
 
-            let base = PlaceRef { local: place.local, projection: &place.projection[..index] };
-            value = self.project(base, value, proj, &mut from_non_ssa_index)?;
+            (place_ty, value) = self.project(place_ty, value, proj, &mut from_non_ssa_index)?;
         }
 
         if let Value::Projection(pointer, ProjectionElem::Deref) = *self.get(value)
@@ -796,7 +761,12 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             && let Some(v) = self.simplify_place_value(&mut pointee, location)
         {
             value = v;
-            place_ref = pointee.project_deeper(&[], self.tcx).as_ref();
+            // `pointee` holds a `Place`, so `ProjectionElem::Index` holds a `Local`.
+            // That local is SSA, but we otherwise have no guarantee on that local's value at
+            // the current location compared to its value where `pointee` was borrowed.
+            if pointee.projection.iter().all(|elem| !matches!(elem, ProjectionElem::Index(_))) {
+                place_ref = pointee.project_deeper(&[], self.tcx).as_ref();
+            }
         }
         if let Some(new_local) = self.try_as_local(value, location) {
             place_ref = PlaceRef { local: new_local, projection: &[] };
@@ -864,14 +834,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 self.simplify_place_projection(place, location);
                 return Some(self.new_pointer(*place, AddressKind::Address(mutbl)));
             }
-            Rvalue::WrapUnsafeBinder(ref mut op, ty) => {
+            Rvalue::WrapUnsafeBinder(ref mut op, _) => {
                 let value = self.simplify_operand(op, location)?;
-                Value::Cast {
-                    kind: CastKind::Transmute,
-                    value,
-                    from: op.ty(self.local_decls, self.tcx),
-                    to: ty,
-                }
+                Value::Cast { kind: CastKind::Transmute, value }
             }
 
             // Operations.
@@ -896,18 +861,17 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             // Unsupported values.
             Rvalue::ThreadLocalRef(..) | Rvalue::ShallowInitBox(..) => return None,
         };
-        debug!(?value);
-        Some(self.insert(value))
+        let ty = rvalue.ty(self.local_decls, self.tcx);
+        Some(self.insert(ty, value))
     }
 
     fn simplify_discriminant(&mut self, place: VnIndex) -> Option<VnIndex> {
-        if let Value::Aggregate(enum_ty, variant, _) = *self.get(place)
-            && let AggregateTy::Def(enum_did, enum_args) = enum_ty
-            && let DefKind::Enum = self.tcx.def_kind(enum_did)
+        let enum_ty = self.ty(place);
+        if enum_ty.is_enum()
+            && let Value::Aggregate(variant, _) = *self.get(place)
         {
-            let enum_ty = self.tcx.type_of(enum_did).instantiate(self.tcx, enum_args);
             let discr = self.ecx.discriminant_for_variant(enum_ty, variant).discard_err()?;
-            return Some(self.insert_scalar(discr.to_scalar(), discr.layout.ty));
+            return Some(self.insert_scalar(discr.layout.ty, discr.to_scalar()));
         }
 
         None
@@ -915,30 +879,18 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
 
     fn try_as_place_elem(
         &mut self,
-        proj: ProjectionElem<VnIndex, Ty<'tcx>>,
+        ty: Ty<'tcx>,
+        proj: ProjectionElem<VnIndex, ()>,
         loc: Location,
     ) -> Option<PlaceElem<'tcx>> {
-        Some(match proj {
-            ProjectionElem::Deref => ProjectionElem::Deref,
-            ProjectionElem::Field(idx, ty) => ProjectionElem::Field(idx, ty),
-            ProjectionElem::Index(idx) => {
-                let Some(local) = self.try_as_local(idx, loc) else {
-                    return None;
-                };
+        proj.try_map(
+            |value| {
+                let local = self.try_as_local(value, loc)?;
                 self.reused_locals.insert(local);
-                ProjectionElem::Index(local)
-            }
-            ProjectionElem::ConstantIndex { offset, min_length, from_end } => {
-                ProjectionElem::ConstantIndex { offset, min_length, from_end }
-            }
-            ProjectionElem::Subslice { from, to, from_end } => {
-                ProjectionElem::Subslice { from, to, from_end }
-            }
-            ProjectionElem::Downcast(symbol, idx) => ProjectionElem::Downcast(symbol, idx),
-            ProjectionElem::OpaqueCast(idx) => ProjectionElem::OpaqueCast(idx),
-            ProjectionElem::Subtype(idx) => ProjectionElem::Subtype(idx),
-            ProjectionElem::UnwrapUnsafeBinder(ty) => ProjectionElem::UnwrapUnsafeBinder(ty),
-        })
+                Some(local)
+            },
+            |()| ty,
+        )
     }
 
     fn simplify_aggregate_to_copy(
@@ -983,8 +935,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
 
         // Allow introducing places with non-constant offsets, as those are still better than
         // reconstructing an aggregate.
-        if let Some(place) = self.try_as_place(copy_from_local_value, location, true)
-            && rvalue.ty(self.local_decls, self.tcx) == place.ty(self.local_decls, self.tcx).ty
+        if self.ty(copy_from_local_value) == rvalue.ty(self.local_decls, self.tcx)
+            && let Some(place) = self.try_as_place(copy_from_local_value, location, true)
         {
             // Avoid creating `*a = copy (*b)`, as they might be aliases resulting in overlapping assignments.
             // FIXME: This also avoids any kind of projection, not just derefs. We can add allowed projections.
@@ -1004,9 +956,11 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
         rvalue: &mut Rvalue<'tcx>,
         location: Location,
     ) -> Option<VnIndex> {
+        let tcx = self.tcx;
+        let ty = rvalue.ty(self.local_decls, tcx);
+
         let Rvalue::Aggregate(box ref kind, ref mut field_ops) = *rvalue else { bug!() };
 
-        let tcx = self.tcx;
         if field_ops.is_empty() {
             let is_zst = match *kind {
                 AggregateKind::Array(..)
@@ -1021,87 +975,72 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             };
 
             if is_zst {
-                let ty = rvalue.ty(self.local_decls, tcx);
                 return Some(self.insert_constant(Const::zero_sized(ty)));
             }
         }
 
-        let (mut ty, variant_index) = match *kind {
-            AggregateKind::Array(..) => {
-                assert!(!field_ops.is_empty());
-                (AggregateTy::Array, FIRST_VARIANT)
-            }
-            AggregateKind::Tuple => {
+        let fields: Vec<_> = field_ops
+            .iter_mut()
+            .map(|op| {
+                self.simplify_operand(op, location)
+                    .unwrap_or_else(|| self.new_opaque(op.ty(self.local_decls, self.tcx)))
+            })
+            .collect();
+
+        let variant_index = match *kind {
+            AggregateKind::Array(..) | AggregateKind::Tuple => {
                 assert!(!field_ops.is_empty());
-                (AggregateTy::Tuple, FIRST_VARIANT)
-            }
-            AggregateKind::Closure(did, args)
-            | AggregateKind::CoroutineClosure(did, args)
-            | AggregateKind::Coroutine(did, args) => (AggregateTy::Def(did, args), FIRST_VARIANT),
-            AggregateKind::Adt(did, variant_index, args, _, None) => {
-                (AggregateTy::Def(did, args), variant_index)
+                FIRST_VARIANT
             }
+            AggregateKind::Closure(..)
+            | AggregateKind::CoroutineClosure(..)
+            | AggregateKind::Coroutine(..) => FIRST_VARIANT,
+            AggregateKind::Adt(_, variant_index, _, _, None) => variant_index,
             // Do not track unions.
             AggregateKind::Adt(_, _, _, _, Some(_)) => return None,
-            AggregateKind::RawPtr(pointee_ty, mtbl) => {
+            AggregateKind::RawPtr(..) => {
                 assert_eq!(field_ops.len(), 2);
-                let data_pointer_ty = field_ops[FieldIdx::ZERO].ty(self.local_decls, self.tcx);
-                let output_pointer_ty = Ty::new_ptr(self.tcx, pointee_ty, mtbl);
-                (AggregateTy::RawPtr { data_pointer_ty, output_pointer_ty }, FIRST_VARIANT)
-            }
-        };
-
-        let mut fields: Vec<_> = field_ops
-            .iter_mut()
-            .map(|op| self.simplify_operand(op, location).unwrap_or_else(|| self.new_opaque()))
-            .collect();
-
-        if let AggregateTy::RawPtr { data_pointer_ty, output_pointer_ty } = &mut ty {
-            let mut was_updated = false;
+                let [mut pointer, metadata] = fields.try_into().unwrap();
+
+                // Any thin pointer of matching mutability is fine as the data pointer.
+                let mut was_updated = false;
+                while let Value::Cast { kind: CastKind::PtrToPtr, value: cast_value } =
+                    self.get(pointer)
+                    && let ty::RawPtr(from_pointee_ty, from_mtbl) = self.ty(*cast_value).kind()
+                    && let ty::RawPtr(_, output_mtbl) = ty.kind()
+                    && from_mtbl == output_mtbl
+                    && from_pointee_ty.is_sized(self.tcx, self.typing_env())
+                {
+                    pointer = *cast_value;
+                    was_updated = true;
+                }
 
-            // Any thin pointer of matching mutability is fine as the data pointer.
-            while let Value::Cast {
-                kind: CastKind::PtrToPtr,
-                value: cast_value,
-                from: cast_from,
-                to: _,
-            } = self.get(fields[0])
-                && let ty::RawPtr(from_pointee_ty, from_mtbl) = cast_from.kind()
-                && let ty::RawPtr(_, output_mtbl) = output_pointer_ty.kind()
-                && from_mtbl == output_mtbl
-                && from_pointee_ty.is_sized(self.tcx, self.typing_env())
-            {
-                fields[0] = *cast_value;
-                *data_pointer_ty = *cast_from;
-                was_updated = true;
-            }
+                if was_updated && let Some(op) = self.try_as_operand(pointer, location) {
+                    field_ops[FieldIdx::ZERO] = op;
+                }
 
-            if was_updated && let Some(op) = self.try_as_operand(fields[0], location) {
-                field_ops[FieldIdx::ZERO] = op;
+                return Some(self.insert(ty, Value::RawPtr { pointer, metadata }));
             }
-        }
+        };
 
-        if let AggregateTy::Array = ty
-            && fields.len() > 4
-        {
+        if ty.is_array() && fields.len() > 4 {
             let first = fields[0];
             if fields.iter().all(|&v| v == first) {
                 let len = ty::Const::from_target_usize(self.tcx, fields.len().try_into().unwrap());
                 if let Some(op) = self.try_as_operand(first, location) {
                     *rvalue = Rvalue::Repeat(op, len);
                 }
-                return Some(self.insert(Value::Repeat(first, len)));
+                return Some(self.insert(ty, Value::Repeat(first, len)));
             }
         }
 
-        if let AggregateTy::Def(_, _) = ty
-            && let Some(value) =
-                self.simplify_aggregate_to_copy(lhs, rvalue, location, &fields, variant_index)
+        if let Some(value) =
+            self.simplify_aggregate_to_copy(lhs, rvalue, location, &fields, variant_index)
         {
             return Some(value);
         }
 
-        Some(self.insert(Value::Aggregate(ty, variant_index, fields)))
+        Some(self.insert(ty, Value::Aggregate(variant_index, fields)))
     }
 
     #[instrument(level = "trace", skip(self), ret)]
@@ -1112,6 +1051,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
         location: Location,
     ) -> Option<VnIndex> {
         let mut arg_index = self.simplify_operand(arg_op, location)?;
+        let arg_ty = self.ty(arg_index);
+        let ret_ty = op.ty(self.tcx, arg_ty);
 
         // PtrMetadata doesn't care about *const vs *mut vs & vs &mut,
         // so start by removing those distinctions so we can update the `Operand`
@@ -1127,8 +1068,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     // we can't always know exactly what the metadata are.
                     // To allow things like `*mut (?A, ?T)` <-> `*mut (?B, ?T)`,
                     // it's fine to get a projection as the type.
-                    Value::Cast { kind: CastKind::PtrToPtr, value: inner, from, to }
-                        if self.pointers_have_same_metadata(*from, *to) =>
+                    Value::Cast { kind: CastKind::PtrToPtr, value: inner }
+                        if self.pointers_have_same_metadata(self.ty(*inner), arg_ty) =>
                     {
                         arg_index = *inner;
                         was_updated = true;
@@ -1165,26 +1106,22 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             (UnOp::Not, Value::BinaryOp(BinOp::Ne, lhs, rhs)) => {
                 Value::BinaryOp(BinOp::Eq, *lhs, *rhs)
             }
-            (UnOp::PtrMetadata, Value::Aggregate(AggregateTy::RawPtr { .. }, _, fields)) => {
-                return Some(fields[1]);
-            }
+            (UnOp::PtrMetadata, Value::RawPtr { metadata, .. }) => return Some(*metadata),
             // We have an unsizing cast, which assigns the length to wide pointer metadata.
             (
                 UnOp::PtrMetadata,
                 Value::Cast {
                     kind: CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize, _),
-                    from,
-                    to,
-                    ..
+                    value: inner,
                 },
-            ) if let ty::Slice(..) = to.builtin_deref(true).unwrap().kind()
-                && let ty::Array(_, len) = from.builtin_deref(true).unwrap().kind() =>
+            ) if let ty::Slice(..) = arg_ty.builtin_deref(true).unwrap().kind()
+                && let ty::Array(_, len) = self.ty(*inner).builtin_deref(true).unwrap().kind() =>
             {
                 return Some(self.insert_constant(Const::Ty(self.tcx.types.usize, *len)));
             }
             _ => Value::UnaryOp(op, arg_index),
         };
-        Some(self.insert(value))
+        Some(self.insert(ret_ty, value))
     }
 
     #[instrument(level = "trace", skip(self), ret)]
@@ -1197,25 +1134,23 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
     ) -> Option<VnIndex> {
         let lhs = self.simplify_operand(lhs_operand, location);
         let rhs = self.simplify_operand(rhs_operand, location);
+
         // Only short-circuit options after we called `simplify_operand`
         // on both operands for side effect.
         let mut lhs = lhs?;
         let mut rhs = rhs?;
 
-        let lhs_ty = lhs_operand.ty(self.local_decls, self.tcx);
+        let lhs_ty = self.ty(lhs);
 
         // If we're comparing pointers, remove `PtrToPtr` casts if the from
         // types of both casts and the metadata all match.
         if let BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge = op
             && lhs_ty.is_any_ptr()
-            && let Value::Cast {
-                kind: CastKind::PtrToPtr, value: lhs_value, from: lhs_from, ..
-            } = self.get(lhs)
-            && let Value::Cast {
-                kind: CastKind::PtrToPtr, value: rhs_value, from: rhs_from, ..
-            } = self.get(rhs)
-            && lhs_from == rhs_from
-            && self.pointers_have_same_metadata(*lhs_from, lhs_ty)
+            && let Value::Cast { kind: CastKind::PtrToPtr, value: lhs_value } = self.get(lhs)
+            && let Value::Cast { kind: CastKind::PtrToPtr, value: rhs_value } = self.get(rhs)
+            && let lhs_from = self.ty(*lhs_value)
+            && lhs_from == self.ty(*rhs_value)
+            && self.pointers_have_same_metadata(lhs_from, lhs_ty)
         {
             lhs = *lhs_value;
             rhs = *rhs_value;
@@ -1230,8 +1165,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
         if let Some(value) = self.simplify_binary_inner(op, lhs_ty, lhs, rhs) {
             return Some(value);
         }
+        let ty = op.ty(self.tcx, lhs_ty, self.ty(rhs));
         let value = Value::BinaryOp(op, lhs, rhs);
-        Some(self.insert(value))
+        Some(self.insert(ty, value))
     }
 
     fn simplify_binary_inner(
@@ -1323,19 +1259,19 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 | BinOp::Shr,
                 Left(0),
                 _,
-            ) => self.insert_scalar(Scalar::from_uint(0u128, layout.size), lhs_ty),
+            ) => self.insert_scalar(lhs_ty, Scalar::from_uint(0u128, layout.size)),
             // Attempt to simplify `x | ALL_ONES` to `ALL_ONES`.
             (BinOp::BitOr, _, Left(ones)) | (BinOp::BitOr, Left(ones), _)
                 if ones == layout.size.truncate(u128::MAX)
                     || (layout.ty.is_bool() && ones == 1) =>
             {
-                self.insert_scalar(Scalar::from_uint(ones, layout.size), lhs_ty)
+                self.insert_scalar(lhs_ty, Scalar::from_uint(ones, layout.size))
             }
             // Sub/Xor with itself.
             (BinOp::Sub | BinOp::SubWithOverflow | BinOp::SubUnchecked | BinOp::BitXor, a, b)
                 if a == b =>
             {
-                self.insert_scalar(Scalar::from_uint(0u128, layout.size), lhs_ty)
+                self.insert_scalar(lhs_ty, Scalar::from_uint(0u128, layout.size))
             }
             // Comparison:
             // - if both operands can be computed as bits, just compare the bits;
@@ -1349,8 +1285,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
         };
 
         if op.is_overflowing() {
+            let ty = Ty::new_tup(self.tcx, &[self.ty(result), self.tcx.types.bool]);
             let false_val = self.insert_bool(false);
-            Some(self.insert_tuple(vec![result, false_val]))
+            Some(self.insert_tuple(ty, vec![result, false_val]))
         } else {
             Some(result)
         }
@@ -1366,9 +1303,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
         use CastKind::*;
         use rustc_middle::ty::adjustment::PointerCoercion::*;
 
-        let mut from = initial_operand.ty(self.local_decls, self.tcx);
         let mut kind = *initial_kind;
         let mut value = self.simplify_operand(initial_operand, location)?;
+        let mut from = self.ty(value);
         if from == to {
             return Some(value);
         }
@@ -1376,7 +1313,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
         if let CastKind::PointerCoercion(ReifyFnPointer | ClosureFnPointer(_), _) = kind {
             // Each reification of a generic fn may get a different pointer.
             // Do not try to merge them.
-            return Some(self.new_opaque());
+            return Some(self.new_opaque(to));
         }
 
         let mut was_ever_updated = false;
@@ -1399,23 +1336,22 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             // If a cast just casts away the metadata again, then we can get it by
             // casting the original thin pointer passed to `from_raw_parts`
             if let PtrToPtr = kind
-                && let Value::Aggregate(AggregateTy::RawPtr { data_pointer_ty, .. }, _, fields) =
-                    self.get(value)
+                && let Value::RawPtr { pointer, .. } = self.get(value)
                 && let ty::RawPtr(to_pointee, _) = to.kind()
                 && to_pointee.is_sized(self.tcx, self.typing_env())
             {
-                from = *data_pointer_ty;
-                value = fields[0];
+                from = self.ty(*pointer);
+                value = *pointer;
                 was_updated_this_iteration = true;
-                if *data_pointer_ty == to {
-                    return Some(fields[0]);
+                if from == to {
+                    return Some(*pointer);
                 }
             }
 
             // Aggregate-then-Transmute can just transmute the original field value,
             // so long as the bytes of a value from only from a single field.
             if let Transmute = kind
-                && let Value::Aggregate(_aggregate_ty, variant_idx, field_values) = self.get(value)
+                && let Value::Aggregate(variant_idx, field_values) = self.get(value)
                 && let Some((field_idx, field_ty)) =
                     self.value_is_all_in_one_field(from, *variant_idx)
             {
@@ -1428,13 +1364,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             }
 
             // Various cast-then-cast cases can be simplified.
-            if let Value::Cast {
-                kind: inner_kind,
-                value: inner_value,
-                from: inner_from,
-                to: inner_to,
-            } = *self.get(value)
-            {
+            if let Value::Cast { kind: inner_kind, value: inner_value } = *self.get(value) {
+                let inner_from = self.ty(inner_value);
                 let new_kind = match (inner_kind, kind) {
                     // Even if there's a narrowing cast in here that's fine, because
                     // things like `*mut [i32] -> *mut i32 -> *const i32` and
@@ -1443,9 +1374,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     // PtrToPtr-then-Transmute is fine so long as the pointer cast is identity:
                     // `*const T -> *mut T -> NonNull<T>` is fine, but we need to check for narrowing
                     // to skip things like `*const [i32] -> *const i32 -> NonNull<T>`.
-                    (PtrToPtr, Transmute)
-                        if self.pointers_have_same_metadata(inner_from, inner_to) =>
-                    {
+                    (PtrToPtr, Transmute) if self.pointers_have_same_metadata(inner_from, from) => {
                         Some(Transmute)
                     }
                     // Similarly, for Transmute-then-PtrToPtr. Note that we need to check different
@@ -1456,7 +1385,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     // If would be legal to always do this, but we don't want to hide information
                     // from the backend that it'd otherwise be able to use for optimizations.
                     (Transmute, Transmute)
-                        if !self.type_may_have_niche_of_interest_to_backend(inner_to) =>
+                        if !self.type_may_have_niche_of_interest_to_backend(from) =>
                     {
                         Some(Transmute)
                     }
@@ -1485,7 +1414,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             *initial_kind = kind;
         }
 
-        Some(self.insert(Value::Cast { kind, value, from, to }))
+        Some(self.insert(to, Value::Cast { kind, value }))
     }
 
     fn simplify_len(&mut self, place: &mut Place<'tcx>, location: Location) -> Option<VnIndex> {
@@ -1507,18 +1436,18 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
         }
 
         // We have an unsizing cast, which assigns the length to wide pointer metadata.
-        if let Value::Cast { kind, from, to, .. } = self.get(inner)
+        if let Value::Cast { kind, value: from } = self.get(inner)
             && let CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize, _) = kind
-            && let Some(from) = from.builtin_deref(true)
+            && let Some(from) = self.ty(*from).builtin_deref(true)
             && let ty::Array(_, len) = from.kind()
-            && let Some(to) = to.builtin_deref(true)
+            && let Some(to) = self.ty(inner).builtin_deref(true)
             && let ty::Slice(..) = to.kind()
         {
             return Some(self.insert_constant(Const::Ty(self.tcx.types.usize, *len)));
         }
 
         // Fallback: a symbolic `Len`.
-        Some(self.insert(Value::Len(inner)))
+        Some(self.insert(self.tcx.types.usize, Value::Len(inner)))
     }
 
     fn pointers_have_same_metadata(&self, left_ptr_ty: Ty<'tcx>, right_ptr_ty: Ty<'tcx>) -> bool {
@@ -1592,7 +1521,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
 fn op_to_prop_const<'tcx>(
     ecx: &mut InterpCx<'tcx, DummyMachine>,
     op: &OpTy<'tcx>,
-) -> Option<ConstValue<'tcx>> {
+) -> Option<ConstValue> {
     // Do not attempt to propagate unsized locals.
     if op.layout.is_unsized() {
         return None;
@@ -1727,7 +1656,7 @@ impl<'tcx> VnState<'_, 'tcx> {
                 return Some(place);
             } else if let Value::Projection(pointer, proj) = *self.get(index)
                 && (allow_complex_projection || proj.is_stable_offset())
-                && let Some(proj) = self.try_as_place_elem(proj, loc)
+                && let Some(proj) = self.try_as_place_elem(self.ty(index), proj, loc)
             {
                 projection.push(proj);
                 index = pointer;
@@ -1755,7 +1684,7 @@ impl<'tcx> MutVisitor<'tcx> for VnState<'_, 'tcx> {
 
     fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
         self.simplify_place_projection(place, location);
-        if context.is_mutating_use() && !place.projection.is_empty() {
+        if context.is_mutating_use() && place.is_indirect() {
             // Non-local mutation maybe invalidate deref.
             self.invalidate_derefs();
         }
@@ -1767,36 +1696,42 @@ impl<'tcx> MutVisitor<'tcx> for VnState<'_, 'tcx> {
         self.super_operand(operand, location);
     }
 
-    fn visit_statement(&mut self, stmt: &mut Statement<'tcx>, location: Location) {
-        if let StatementKind::Assign(box (ref mut lhs, ref mut rvalue)) = stmt.kind {
-            self.simplify_place_projection(lhs, location);
-
-            let value = self.simplify_rvalue(lhs, rvalue, location);
-            let value = if let Some(local) = lhs.as_local()
-                && self.ssa.is_ssa(local)
-                // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark
-                // `local` as reusable if we have an exact type match.
-                && self.local_decls[local].ty == rvalue.ty(self.local_decls, self.tcx)
+    fn visit_assign(
+        &mut self,
+        lhs: &mut Place<'tcx>,
+        rvalue: &mut Rvalue<'tcx>,
+        location: Location,
+    ) {
+        self.simplify_place_projection(lhs, location);
+
+        let value = self.simplify_rvalue(lhs, rvalue, location);
+        if let Some(value) = value {
+            if let Some(const_) = self.try_as_constant(value) {
+                *rvalue = Rvalue::Use(Operand::Constant(Box::new(const_)));
+            } else if let Some(place) = self.try_as_place(value, location, false)
+                && *rvalue != Rvalue::Use(Operand::Move(place))
+                && *rvalue != Rvalue::Use(Operand::Copy(place))
             {
-                let value = value.unwrap_or_else(|| self.new_opaque());
-                self.assign(local, value);
-                Some(value)
-            } else {
-                value
-            };
-            if let Some(value) = value {
-                if let Some(const_) = self.try_as_constant(value) {
-                    *rvalue = Rvalue::Use(Operand::Constant(Box::new(const_)));
-                } else if let Some(place) = self.try_as_place(value, location, false)
-                    && *rvalue != Rvalue::Use(Operand::Move(place))
-                    && *rvalue != Rvalue::Use(Operand::Copy(place))
-                {
-                    *rvalue = Rvalue::Use(Operand::Copy(place));
-                    self.reused_locals.insert(place.local);
-                }
+                *rvalue = Rvalue::Use(Operand::Copy(place));
+                self.reused_locals.insert(place.local);
             }
         }
-        self.super_statement(stmt, location);
+
+        if lhs.is_indirect() {
+            // Non-local mutation maybe invalidate deref.
+            self.invalidate_derefs();
+        }
+
+        if let Some(local) = lhs.as_local()
+            && self.ssa.is_ssa(local)
+            && let rvalue_ty = rvalue.ty(self.local_decls, self.tcx)
+            // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark
+            // `local` as reusable if we have an exact type match.
+            && self.local_decls[local].ty == rvalue_ty
+        {
+            let value = value.unwrap_or_else(|| self.new_opaque(rvalue_ty));
+            self.assign(local, value);
+        }
     }
 
     fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) {
@@ -1804,7 +1739,8 @@ impl<'tcx> MutVisitor<'tcx> for VnState<'_, 'tcx> {
             if let Some(local) = destination.as_local()
                 && self.ssa.is_ssa(local)
             {
-                let opaque = self.new_opaque();
+                let ty = self.local_decls[local].ty;
+                let opaque = self.new_opaque(ty);
                 self.assign(local, opaque);
             }
         }
diff --git a/compiler/rustc_mir_transform/src/impossible_predicates.rs b/compiler/rustc_mir_transform/src/impossible_predicates.rs
index 86e2bf6cb3c..b03518de00a 100644
--- a/compiler/rustc_mir_transform/src/impossible_predicates.rs
+++ b/compiler/rustc_mir_transform/src/impossible_predicates.rs
@@ -27,7 +27,7 @@
 //! it's usually never invoked in this way.
 
 use rustc_middle::mir::{Body, START_BLOCK, TerminatorKind};
-use rustc_middle::ty::{TyCtxt, TypeVisitableExt};
+use rustc_middle::ty::{TyCtxt, TypeFlags, TypeVisitableExt};
 use rustc_trait_selection::traits;
 use tracing::trace;
 
@@ -36,14 +36,23 @@ use crate::pass_manager::MirPass;
 pub(crate) struct ImpossiblePredicates;
 
 impl<'tcx> MirPass<'tcx> for ImpossiblePredicates {
+    #[tracing::instrument(level = "trace", skip(self, tcx, body))]
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-        let predicates = tcx
-            .predicates_of(body.source.def_id())
-            .predicates
-            .iter()
-            .filter_map(|(p, _)| if p.is_global() { Some(*p) } else { None });
-        if traits::impossible_predicates(tcx, traits::elaborate(tcx, predicates).collect()) {
-            trace!("found unsatisfiable predicates for {:?}", body.source);
+        tracing::trace!(def_id = ?body.source.def_id());
+        let predicates = tcx.predicates_of(body.source.def_id()).instantiate_identity(tcx);
+        tracing::trace!(?predicates);
+        let predicates = predicates.predicates.into_iter().filter(|p| {
+            !p.has_type_flags(
+                // Only consider global clauses to simplify.
+                TypeFlags::HAS_FREE_LOCAL_NAMES
+                // Clauses that refer to unevaluated constants as they cause cycles.
+                | TypeFlags::HAS_CT_PROJECTION,
+            )
+        });
+        let predicates: Vec<_> = traits::elaborate(tcx, predicates).collect();
+        tracing::trace!(?predicates);
+        if predicates.references_error() || traits::impossible_predicates(tcx, predicates) {
+            trace!("found unsatisfiable predicates");
             // Clear the body to only contain a single `unreachable` statement.
             let bbs = body.basic_blocks.as_mut();
             bbs.raw.truncate(1);
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index 1c0fc774867..3d49eb4e8ef 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -5,7 +5,7 @@ use std::iter;
 use std::ops::{Range, RangeFrom};
 
 use rustc_abi::{ExternAbi, FieldIdx};
-use rustc_attr_data_structures::{InlineAttr, OptimizeAttr};
+use rustc_hir::attrs::{InlineAttr, OptimizeAttr};
 use rustc_hir::def::DefKind;
 use rustc_hir::def_id::DefId;
 use rustc_index::Idx;
diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs
index 93a81f0dca5..7f9234d1dc8 100644
--- a/compiler/rustc_mir_transform/src/inline/cycle.rs
+++ b/compiler/rustc_mir_transform/src/inline/cycle.rs
@@ -64,15 +64,15 @@ fn process<'tcx>(
     typing_env: ty::TypingEnv<'tcx>,
     caller: ty::Instance<'tcx>,
     target: LocalDefId,
-    seen: &mut FxHashSet<ty::Instance<'tcx>>,
+    seen: &mut FxHashMap<ty::Instance<'tcx>, bool>,
     involved: &mut FxHashSet<LocalDefId>,
     recursion_limiter: &mut FxHashMap<DefId, usize>,
     recursion_limit: Limit,
 ) -> bool {
     trace!(%caller);
-    let mut cycle_found = false;
+    let mut reaches_root = false;
 
-    for &(callee, args) in tcx.mir_inliner_callees(caller.def) {
+    for &(callee_def_id, args) in tcx.mir_inliner_callees(caller.def) {
         let Ok(args) = caller.try_instantiate_mir_and_normalize_erasing_regions(
             tcx,
             typing_env,
@@ -81,14 +81,17 @@ fn process<'tcx>(
             trace!(?caller, ?typing_env, ?args, "cannot normalize, skipping");
             continue;
         };
-        let Ok(Some(callee)) = ty::Instance::try_resolve(tcx, typing_env, callee, args) else {
-            trace!(?callee, "cannot resolve, skipping");
+        let Ok(Some(callee)) = ty::Instance::try_resolve(tcx, typing_env, callee_def_id, args)
+        else {
+            trace!(?callee_def_id, "cannot resolve, skipping");
             continue;
         };
 
         // Found a path.
         if callee.def_id() == target.to_def_id() {
-            cycle_found = true;
+            reaches_root = true;
+            seen.insert(callee, true);
+            continue;
         }
 
         if tcx.is_constructor(callee.def_id()) {
@@ -101,10 +104,17 @@ fn process<'tcx>(
             continue;
         }
 
-        if seen.insert(callee) {
+        let callee_reaches_root = if let Some(&c) = seen.get(&callee) {
+            // Even if we have seen this callee before, and thus don't need
+            // to recurse into it, we still need to propagate whether it reaches
+            // the root so that we can mark all the involved callers, in case we
+            // end up reaching that same recursive callee through some *other* cycle.
+            c
+        } else {
+            seen.insert(callee, false);
             let recursion = recursion_limiter.entry(callee.def_id()).or_default();
             trace!(?callee, recursion = *recursion);
-            let found_recursion = if recursion_limit.value_within_limit(*recursion) {
+            let callee_reaches_root = if recursion_limit.value_within_limit(*recursion) {
                 *recursion += 1;
                 ensure_sufficient_stack(|| {
                     process(
@@ -122,17 +132,19 @@ fn process<'tcx>(
                 // Pessimistically assume that there could be recursion.
                 true
             };
-            if found_recursion {
-                if let Some(callee) = callee.def_id().as_local() {
-                    // Calling `optimized_mir` of a non-local definition cannot cycle.
-                    involved.insert(callee);
-                }
-                cycle_found = true;
+            seen.insert(callee, callee_reaches_root);
+            callee_reaches_root
+        };
+        if callee_reaches_root {
+            if let Some(callee_def_id) = callee.def_id().as_local() {
+                // Calling `optimized_mir` of a non-local definition cannot cycle.
+                involved.insert(callee_def_id);
             }
+            reaches_root = true;
         }
     }
 
-    cycle_found
+    reaches_root
 }
 
 #[instrument(level = "debug", skip(tcx), ret)]
@@ -166,7 +178,7 @@ pub(crate) fn mir_callgraph_cyclic<'tcx>(
         typing_env,
         root_instance,
         root,
-        &mut FxHashSet::default(),
+        &mut FxHashMap::default(),
         &mut involved,
         &mut FxHashMap::default(),
         recursion_limit,
diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs
index dbcaed20953..c83bd25c663 100644
--- a/compiler/rustc_mir_transform/src/instsimplify.rs
+++ b/compiler/rustc_mir_transform/src/instsimplify.rs
@@ -55,6 +55,7 @@ impl<'tcx> crate::MirPass<'tcx> for InstSimplify {
 
             let terminator = block.terminator.as_mut().unwrap();
             ctx.simplify_primitive_clone(terminator, &mut block.statements);
+            ctx.simplify_align_of_slice_val(terminator, &mut block.statements);
             ctx.simplify_intrinsic_assert(terminator);
             ctx.simplify_nounwind_call(terminator);
             simplify_duplicate_switch_targets(terminator);
@@ -252,6 +253,36 @@ impl<'tcx> InstSimplifyContext<'_, 'tcx> {
         terminator.kind = TerminatorKind::Goto { target: *destination_block };
     }
 
+    // Convert `align_of_val::<[T]>(ptr)` to `align_of::<T>()`, since the
+    // alignment of a slice doesn't actually depend on metadata at all
+    // and the element type is always `Sized`.
+    //
+    // This is here so it can run after inlining, where it's more useful.
+    // (LowerIntrinsics is done in cleanup, before the optimization passes.)
+    fn simplify_align_of_slice_val(
+        &self,
+        terminator: &mut Terminator<'tcx>,
+        statements: &mut Vec<Statement<'tcx>>,
+    ) {
+        if let TerminatorKind::Call {
+            func, args, destination, target: Some(destination_block), ..
+        } = &terminator.kind
+            && args.len() == 1
+            && let Some((fn_def_id, generics)) = func.const_fn_def()
+            && self.tcx.is_intrinsic(fn_def_id, sym::align_of_val)
+            && let ty::Slice(elem_ty) = *generics.type_at(0).kind()
+        {
+            statements.push(Statement::new(
+                terminator.source_info,
+                StatementKind::Assign(Box::new((
+                    *destination,
+                    Rvalue::NullaryOp(NullOp::AlignOf, elem_ty),
+                ))),
+            ));
+            terminator.kind = TerminatorKind::Goto { target: *destination_block };
+        }
+    }
+
     fn simplify_nounwind_call(&self, terminator: &mut Terminator<'tcx>) {
         let TerminatorKind::Call { ref func, ref mut unwind, .. } = terminator.kind else {
             return;
diff --git a/compiler/rustc_mir_transform/src/pass_manager.rs b/compiler/rustc_mir_transform/src/pass_manager.rs
index 7a8d3ba1ff1..5b3ddcc777b 100644
--- a/compiler/rustc_mir_transform/src/pass_manager.rs
+++ b/compiler/rustc_mir_transform/src/pass_manager.rs
@@ -41,19 +41,40 @@ fn to_profiler_name(type_name: &'static str) -> &'static str {
     })
 }
 
-// const wrapper for `if let Some((_, tail)) = name.rsplit_once(':') { tail } else { name }`
-const fn c_name(name: &'static str) -> &'static str {
+// A function that simplifies a pass's type_name. E.g. `Baz`, `Baz<'_>`,
+// `foo::bar::Baz`, and `foo::bar::Baz<'a, 'b>` all become `Baz`.
+//
+// It's `const` for perf reasons: it's called a lot, and doing the string
+// operations at runtime causes a non-trivial slowdown. If
+// `split_once`/`rsplit_once` become `const` its body could be simplified to
+// this:
+// ```ignore (fragment)
+// let name = if let Some((_, tail)) = name.rsplit_once(':') { tail } else { name };
+// let name = if let Some((head, _)) = name.split_once('<') { head } else { name };
+// name
+// ```
+const fn simplify_pass_type_name(name: &'static str) -> &'static str {
     // FIXME(const-hack) Simplify the implementation once more `str` methods get const-stable.
-    // and inline into call site
+
+    // Work backwards from the end. If a ':' is hit, strip it and everything before it.
     let bytes = name.as_bytes();
     let mut i = bytes.len();
     while i > 0 && bytes[i - 1] != b':' {
-        i = i - 1;
+        i -= 1;
     }
     let (_, bytes) = bytes.split_at(i);
+
+    // Work forwards from the start of what's left. If a '<' is hit, strip it and everything after
+    // it.
+    let mut i = 0;
+    while i < bytes.len() && bytes[i] != b'<' {
+        i += 1;
+    }
+    let (bytes, _) = bytes.split_at(i);
+
     match std::str::from_utf8(bytes) {
         Ok(name) => name,
-        Err(_) => name,
+        Err(_) => panic!(),
     }
 }
 
@@ -62,12 +83,7 @@ const fn c_name(name: &'static str) -> &'static str {
 /// loop that goes over each available MIR and applies `run_pass`.
 pub(super) trait MirPass<'tcx> {
     fn name(&self) -> &'static str {
-        // FIXME(const-hack) Simplify the implementation once more `str` methods get const-stable.
-        // See copypaste in `MirLint`
-        const {
-            let name = std::any::type_name::<Self>();
-            c_name(name)
-        }
+        const { simplify_pass_type_name(std::any::type_name::<Self>()) }
     }
 
     fn profiler_name(&self) -> &'static str {
@@ -101,12 +117,7 @@ pub(super) trait MirPass<'tcx> {
 /// disabled (via the `Lint` adapter).
 pub(super) trait MirLint<'tcx> {
     fn name(&self) -> &'static str {
-        // FIXME(const-hack) Simplify the implementation once more `str` methods get const-stable.
-        // See copypaste in `MirPass`
-        const {
-            let name = std::any::type_name::<Self>();
-            c_name(name)
-        }
+        const { simplify_pass_type_name(std::any::type_name::<Self>()) }
     }
 
     fn is_enabled(&self, _sess: &Session) -> bool {
diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs
index 4e8f30e077b..462ddfa3dd3 100644
--- a/compiler/rustc_mir_transform/src/promote_consts.rs
+++ b/compiler/rustc_mir_transform/src/promote_consts.rs
@@ -997,12 +997,11 @@ fn promote_candidates<'tcx>(
     for candidate in candidates.into_iter().rev() {
         let Location { block, statement_index } = candidate.location;
         if let StatementKind::Assign(box (place, _)) = &body[block].statements[statement_index].kind
+            && let Some(local) = place.as_local()
         {
-            if let Some(local) = place.as_local() {
-                if temps[local] == TempState::PromotedOut {
-                    // Already promoted.
-                    continue;
-                }
+            if temps[local] == TempState::PromotedOut {
+                // Already promoted.
+                continue;
             }
         }
 
@@ -1066,11 +1065,11 @@ fn promote_candidates<'tcx>(
             _ => true,
         });
         let terminator = block.terminator_mut();
-        if let TerminatorKind::Drop { place, target, .. } = &terminator.kind {
-            if let Some(index) = place.as_local() {
-                if promoted(index) {
-                    terminator.kind = TerminatorKind::Goto { target: *target };
-                }
+        if let TerminatorKind::Drop { place, target, .. } = &terminator.kind
+            && let Some(index) = place.as_local()
+        {
+            if promoted(index) {
+                terminator.kind = TerminatorKind::Goto { target: *target };
             }
         }
     }
diff --git a/compiler/rustc_mir_transform/src/ref_prop.rs b/compiler/rustc_mir_transform/src/ref_prop.rs
index d1c2d6b508f..6f61215cee2 100644
--- a/compiler/rustc_mir_transform/src/ref_prop.rs
+++ b/compiler/rustc_mir_transform/src/ref_prop.rs
@@ -79,6 +79,7 @@ impl<'tcx> crate::MirPass<'tcx> for ReferencePropagation {
     #[instrument(level = "trace", skip(self, tcx, body))]
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         debug!(def_id = ?body.source.def_id());
+        move_to_copy_pointers(tcx, body);
         while propagate_ssa(tcx, body) {}
     }
 
@@ -87,11 +88,43 @@ impl<'tcx> crate::MirPass<'tcx> for ReferencePropagation {
     }
 }
 
+/// The SSA analysis done by [`SsaLocals`] treats [`Operand::Move`] as a read, even though in
+/// general [`Operand::Move`] represents pass-by-pointer where the callee can overwrite the
+/// pointee (Miri always considers the place deinitialized). CopyProp has a similar trick to
+/// turn [`Operand::Move`] into [`Operand::Copy`] when required for an optimization, but in this
+/// pass we just turn all moves of pointers into copies because pointers should be by-value anyway.
+fn move_to_copy_pointers<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+    let mut visitor = MoveToCopyVisitor { tcx, local_decls: &body.local_decls };
+    for (bb, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() {
+        visitor.visit_basic_block_data(bb, data);
+    }
+
+    struct MoveToCopyVisitor<'a, 'tcx> {
+        tcx: TyCtxt<'tcx>,
+        local_decls: &'a IndexVec<Local, LocalDecl<'tcx>>,
+    }
+
+    impl<'a, 'tcx> MutVisitor<'tcx> for MoveToCopyVisitor<'a, 'tcx> {
+        fn tcx(&self) -> TyCtxt<'tcx> {
+            self.tcx
+        }
+
+        fn visit_operand(&mut self, operand: &mut Operand<'tcx>, loc: Location) {
+            if let Operand::Move(place) = *operand {
+                if place.ty(self.local_decls, self.tcx).ty.is_any_ptr() {
+                    *operand = Operand::Copy(place);
+                }
+            }
+            self.super_operand(operand, loc);
+        }
+    }
+}
+
 fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool {
     let typing_env = body.typing_env(tcx);
     let ssa = SsaLocals::new(tcx, body, typing_env);
 
-    let mut replacer = compute_replacement(tcx, body, &ssa);
+    let mut replacer = compute_replacement(tcx, body, ssa);
     debug!(?replacer.targets);
     debug!(?replacer.allowed_replacements);
     debug!(?replacer.storage_to_remove);
@@ -119,7 +152,7 @@ enum Value<'tcx> {
 fn compute_replacement<'tcx>(
     tcx: TyCtxt<'tcx>,
     body: &Body<'tcx>,
-    ssa: &SsaLocals,
+    ssa: SsaLocals,
 ) -> Replacer<'tcx> {
     let always_live_locals = always_storage_live_locals(body);
 
@@ -138,7 +171,7 @@ fn compute_replacement<'tcx>(
     // reborrowed references.
     let mut storage_to_remove = DenseBitSet::new_empty(body.local_decls.len());
 
-    let fully_replaceable_locals = fully_replaceable_locals(ssa);
+    let fully_replaceable_locals = fully_replaceable_locals(&ssa);
 
     // Returns true iff we can use `place` as a pointee.
     //
diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
index 797056ad52d..5b6d7ffb511 100644
--- a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
+++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
@@ -48,14 +48,13 @@ impl<'tcx> crate::MirPass<'tcx> for RemoveNoopLandingPads {
         let postorder: Vec<_> = traversal::postorder(body).map(|(bb, _)| bb).collect();
         for bb in postorder {
             debug!("  processing {:?}", bb);
-            if let Some(unwind) = body[bb].terminator_mut().unwind_mut() {
-                if let UnwindAction::Cleanup(unwind_bb) = *unwind {
-                    if nop_landing_pads.contains(unwind_bb) {
-                        debug!("    removing noop landing pad");
-                        landing_pads_removed += 1;
-                        *unwind = UnwindAction::Continue;
-                    }
-                }
+            if let Some(unwind) = body[bb].terminator_mut().unwind_mut()
+                && let UnwindAction::Cleanup(unwind_bb) = *unwind
+                && nop_landing_pads.contains(unwind_bb)
+            {
+                debug!("    removing noop landing pad");
+                landing_pads_removed += 1;
+                *unwind = UnwindAction::Continue;
             }
 
             body[bb].terminator_mut().successors_mut(|target| {
diff --git a/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs b/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs
index 43f80508e4a..6c2dfc59da2 100644
--- a/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs
+++ b/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs
@@ -4,7 +4,13 @@
 //! useful because (unlike MIR building) it runs after type checking, so it can make use of
 //! `TypingMode::PostAnalysis` to provide more precise type information, especially about opaque
 //! types.
+//!
+//! When we're optimizing, we also remove calls to `drop_in_place<T>` when `T` isn't `needs_drop`,
+//! as those are essentially equivalent to `Drop` terminators. While the compiler doesn't insert
+//! them automatically, preferring the built-in instead, they're common in generic code (such as
+//! `Vec::truncate`) so removing them from things like inlined `Vec<u8>` is helpful.
 
+use rustc_hir::LangItem;
 use rustc_middle::mir::*;
 use rustc_middle::ty::TyCtxt;
 use tracing::{debug, trace};
@@ -21,15 +27,26 @@ impl<'tcx> crate::MirPass<'tcx> for RemoveUnneededDrops {
         let mut should_simplify = false;
         for block in body.basic_blocks.as_mut() {
             let terminator = block.terminator_mut();
-            if let TerminatorKind::Drop { place, target, .. } = terminator.kind {
-                let ty = place.ty(&body.local_decls, tcx);
-                if ty.ty.needs_drop(tcx, typing_env) {
-                    continue;
+            let (ty, target) = match terminator.kind {
+                TerminatorKind::Drop { place, target, .. } => {
+                    (place.ty(&body.local_decls, tcx).ty, target)
+                }
+                TerminatorKind::Call { ref func, target: Some(target), .. }
+                    if tcx.sess.mir_opt_level() > 0
+                        && let Some((def_id, generics)) = func.const_fn_def()
+                        && tcx.is_lang_item(def_id, LangItem::DropInPlace) =>
+                {
+                    (generics.type_at(0), target)
                 }
-                debug!("SUCCESS: replacing `drop` with goto({:?})", target);
-                terminator.kind = TerminatorKind::Goto { target };
-                should_simplify = true;
+                _ => continue,
+            };
+
+            if ty.needs_drop(tcx, typing_env) {
+                continue;
             }
+            debug!("SUCCESS: replacing `drop` with goto({:?})", target);
+            terminator.kind = TerminatorKind::Goto { target };
+            should_simplify = true;
         }
 
         // if we applied optimizations, we potentially have some cfg to cleanup to
diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs
index cdbc74cdfa8..c6760b3583f 100644
--- a/compiler/rustc_mir_transform/src/shim.rs
+++ b/compiler/rustc_mir_transform/src/shim.rs
@@ -75,7 +75,7 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceKind<'tcx>) -> Body<
             build_call_shim(tcx, instance, Some(adjustment), CallKind::Direct(def_id))
         }
         ty::InstanceKind::FnPtrShim(def_id, ty) => {
-            let trait_ = tcx.trait_of_item(def_id).unwrap();
+            let trait_ = tcx.parent(def_id);
             // Supports `Fn` or `async Fn` traits.
             let adjustment = match tcx
                 .fn_trait_kind_from_def_id(trait_)
@@ -434,8 +434,8 @@ pub(super) struct DropShimElaborator<'a, 'tcx> {
 }
 
 impl fmt::Debug for DropShimElaborator<'_, '_> {
-    fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
-        Ok(())
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
+        f.debug_struct("DropShimElaborator").finish_non_exhaustive()
     }
 }
 
diff --git a/compiler/rustc_mir_transform/src/simplify.rs b/compiler/rustc_mir_transform/src/simplify.rs
index db933da6413..468ef742dfb 100644
--- a/compiler/rustc_mir_transform/src/simplify.rs
+++ b/compiler/rustc_mir_transform/src/simplify.rs
@@ -225,6 +225,7 @@ impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> {
             current = target;
         }
         let last = current;
+        *changed |= *start != last;
         *start = last;
         while let Some((current, mut terminator)) = terminators.pop() {
             let Terminator { kind: TerminatorKind::Goto { ref mut target }, .. } = terminator
diff --git a/compiler/rustc_mir_transform/src/sroa.rs b/compiler/rustc_mir_transform/src/sroa.rs
index 7c6ccc89c4f..38769885f36 100644
--- a/compiler/rustc_mir_transform/src/sroa.rs
+++ b/compiler/rustc_mir_transform/src/sroa.rs
@@ -1,4 +1,4 @@
-use rustc_abi::{FIRST_VARIANT, FieldIdx};
+use rustc_abi::FieldIdx;
 use rustc_data_structures::flat_map_in_place::FlatMapInPlace;
 use rustc_hir::LangItem;
 use rustc_index::IndexVec;
@@ -32,7 +32,7 @@ impl<'tcx> crate::MirPass<'tcx> for ScalarReplacementOfAggregates {
         let typing_env = body.typing_env(tcx);
         loop {
             debug!(?excluded);
-            let escaping = escaping_locals(tcx, typing_env, &excluded, body);
+            let escaping = escaping_locals(tcx, &excluded, body);
             debug!(?escaping);
             let replacements = compute_flattening(tcx, typing_env, body, escaping);
             debug!(?replacements);
@@ -64,7 +64,6 @@ impl<'tcx> crate::MirPass<'tcx> for ScalarReplacementOfAggregates {
 ///   client code.
 fn escaping_locals<'tcx>(
     tcx: TyCtxt<'tcx>,
-    typing_env: ty::TypingEnv<'tcx>,
     excluded: &DenseBitSet<Local>,
     body: &Body<'tcx>,
 ) -> DenseBitSet<Local> {
@@ -72,31 +71,16 @@ fn escaping_locals<'tcx>(
         if ty.is_union() || ty.is_enum() {
             return true;
         }
-        if let ty::Adt(def, _args) = ty.kind() {
-            if def.repr().simd() {
-                // Exclude #[repr(simd)] types so that they are not de-optimized into an array
-                return true;
-            }
-            if tcx.is_lang_item(def.did(), LangItem::DynMetadata) {
-                // codegen wants to see the `DynMetadata<T>`,
-                // not the inner reference-to-opaque-type.
-                return true;
-            }
-            // We already excluded unions and enums, so this ADT must have one variant
-            let variant = def.variant(FIRST_VARIANT);
-            if variant.fields.len() > 1 {
-                // If this has more than one field, it cannot be a wrapper that only provides a
-                // niche, so we do not want to automatically exclude it.
-                return false;
-            }
-            let Ok(layout) = tcx.layout_of(typing_env.as_query_input(ty)) else {
-                // We can't get the layout
-                return true;
-            };
-            if layout.layout.largest_niche().is_some() {
-                // This type has a niche
-                return true;
-            }
+        if let ty::Adt(def, _args) = ty.kind()
+            && (def.repr().simd() || tcx.is_lang_item(def.did(), LangItem::DynMetadata))
+        {
+            // Exclude #[repr(simd)] types so that they are not de-optimized into an array
+            // (MCP#838 banned projections into SIMD types, but if the value is unused
+            // this pass sees "all the uses are of the fields" and expands it.)
+
+            // codegen wants to see the `DynMetadata<T>`,
+            // not the inner reference-to-opaque-type.
+            return true;
         }
         // Default for non-ADTs
         false
diff --git a/compiler/rustc_mir_transform/src/ssa.rs b/compiler/rustc_mir_transform/src/ssa.rs
index d3b4b99e932..cd9a7f4a39d 100644
--- a/compiler/rustc_mir_transform/src/ssa.rs
+++ b/compiler/rustc_mir_transform/src/ssa.rs
@@ -293,10 +293,6 @@ impl<'tcx> Visitor<'tcx> for SsaVisitor<'_, 'tcx> {
 fn compute_copy_classes(ssa: &mut SsaLocals, body: &Body<'_>) {
     let mut direct_uses = std::mem::take(&mut ssa.direct_uses);
     let mut copies = IndexVec::from_fn_n(|l| l, body.local_decls.len());
-    // We must not unify two locals that are borrowed. But this is fine if one is borrowed and
-    // the other is not. This bitset is keyed by *class head* and contains whether any member of
-    // the class is borrowed.
-    let mut borrowed_classes = ssa.borrowed_locals().clone();
 
     for (local, rvalue, _) in ssa.assignments(body) {
         let (Rvalue::Use(Operand::Copy(place) | Operand::Move(place))
@@ -322,8 +318,12 @@ fn compute_copy_classes(ssa: &mut SsaLocals, body: &Body<'_>) {
         // visited before `local`, and we just have to copy the representing local.
         let head = copies[rhs];
 
-        // Do not unify borrowed locals.
-        if borrowed_classes.contains(local) || borrowed_classes.contains(head) {
+        // When propagating from `head` to `local` we need to ensure that changes to the address
+        // are not observable, so at most one the locals involved can be borrowed. Additionally, we
+        // need to ensure that the definition of `head` dominates all uses of `local`. When `local`
+        // is borrowed, there might exist an indirect use of `local` that isn't dominated by the
+        // definition, so we have to reject copy propagation.
+        if ssa.borrowed_locals().contains(local) {
             continue;
         }
 
@@ -339,21 +339,14 @@ fn compute_copy_classes(ssa: &mut SsaLocals, body: &Body<'_>) {
                     *h = RETURN_PLACE;
                 }
             }
-            if borrowed_classes.contains(head) {
-                borrowed_classes.insert(RETURN_PLACE);
-            }
         } else {
             copies[local] = head;
-            if borrowed_classes.contains(local) {
-                borrowed_classes.insert(head);
-            }
         }
         direct_uses[rhs] -= 1;
     }
 
     debug!(?copies);
     debug!(?direct_uses);
-    debug!(?borrowed_classes);
 
     // Invariant: `copies` must point to the head of an equivalence class.
     #[cfg(debug_assertions)]
@@ -362,13 +355,6 @@ fn compute_copy_classes(ssa: &mut SsaLocals, body: &Body<'_>) {
     }
     debug_assert_eq!(copies[RETURN_PLACE], RETURN_PLACE);
 
-    // Invariant: `borrowed_classes` must be true if any member of the class is borrowed.
-    #[cfg(debug_assertions)]
-    for &head in copies.iter() {
-        let any_borrowed = ssa.borrowed_locals.iter().any(|l| copies[l] == head);
-        assert_eq!(borrowed_classes.contains(head), any_borrowed);
-    }
-
     ssa.direct_uses = direct_uses;
     ssa.copy_classes = copies;
 }
diff --git a/compiler/rustc_mir_transform/src/unreachable_prop.rs b/compiler/rustc_mir_transform/src/unreachable_prop.rs
index 13fb5b3e56f..c417a9272f2 100644
--- a/compiler/rustc_mir_transform/src/unreachable_prop.rs
+++ b/compiler/rustc_mir_transform/src/unreachable_prop.rs
@@ -75,7 +75,7 @@ fn remove_successors_from_switch<'tcx>(
     let is_unreachable = |bb| unreachable_blocks.contains(&bb);
 
     // If there are multiple targets, we want to keep information about reachability for codegen.
-    // For example (see tests/codegen/match-optimizes-away.rs)
+    // For example (see tests/codegen-llvm/match-optimizes-away.rs)
     //
     // pub enum Two { A, B }
     // pub fn identity(x: Two) -> Two {
diff --git a/compiler/rustc_mir_transform/src/validate.rs b/compiler/rustc_mir_transform/src/validate.rs
index cbb9bbfd12f..99e4782e470 100644
--- a/compiler/rustc_mir_transform/src/validate.rs
+++ b/compiler/rustc_mir_transform/src/validate.rs
@@ -1,9 +1,9 @@
 //! Validates the MIR to ensure that invariants are upheld.
 
 use rustc_abi::{ExternAbi, FIRST_VARIANT, Size};
-use rustc_attr_data_structures::InlineAttr;
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 use rustc_hir::LangItem;
+use rustc_hir::attrs::InlineAttr;
 use rustc_index::IndexVec;
 use rustc_index::bit_set::DenseBitSet;
 use rustc_infer::infer::TyCtxtInferExt;
@@ -80,15 +80,14 @@ impl<'tcx> crate::MirPass<'tcx> for Validator {
             cfg_checker.fail(location, msg);
         }
 
-        if let MirPhase::Runtime(_) = body.phase {
-            if let ty::InstanceKind::Item(_) = body.source.instance {
-                if body.has_free_regions() {
-                    cfg_checker.fail(
-                        Location::START,
-                        format!("Free regions in optimized {} MIR", body.phase.name()),
-                    );
-                }
-            }
+        if let MirPhase::Runtime(_) = body.phase
+            && let ty::InstanceKind::Item(_) = body.source.instance
+            && body.has_free_regions()
+        {
+            cfg_checker.fail(
+                Location::START,
+                format!("Free regions in optimized {} MIR", body.phase.name()),
+            );
         }
     }
 
@@ -119,14 +118,16 @@ impl<'a, 'tcx> CfgChecker<'a, 'tcx> {
     #[track_caller]
     fn fail(&self, location: Location, msg: impl AsRef<str>) {
         // We might see broken MIR when other errors have already occurred.
-        assert!(
-            self.tcx.dcx().has_errors().is_some(),
-            "broken MIR in {:?} ({}) at {:?}:\n{}",
-            self.body.source.instance,
-            self.when,
-            location,
-            msg.as_ref(),
-        );
+        if self.tcx.dcx().has_errors().is_none() {
+            span_bug!(
+                self.body.source_info(location).span,
+                "broken MIR in {:?} ({}) at {:?}:\n{}",
+                self.body.source.instance,
+                self.when,
+                location,
+                msg.as_ref(),
+            );
+        }
     }
 
     fn check_edge(&mut self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
@@ -719,6 +720,15 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                             );
                         }
 
+                        if adt_def.repr().simd() {
+                            self.fail(
+                                location,
+                                format!(
+                                    "Projecting into SIMD type {adt_def:?} is banned by MCP#838"
+                                ),
+                            );
+                        }
+
                         let var = parent_ty.variant_index.unwrap_or(FIRST_VARIANT);
                         let Some(field) = adt_def.variant(var).fields.get(f) else {
                             fail_out_of_bounds(self, location);