about summary refs log tree commit diff
path: root/compiler/rustc_mir_transform/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_mir_transform/src')
-rw-r--r--compiler/rustc_mir_transform/src/coroutine.rs2
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mappings.rs6
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs6
-rw-r--r--compiler/rustc_mir_transform/src/dataflow_const_prop.rs48
-rw-r--r--compiler/rustc_mir_transform/src/errors.rs22
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs23
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs2
-rw-r--r--compiler/rustc_mir_transform/src/instsimplify.rs11
-rw-r--r--compiler/rustc_mir_transform/src/jump_threading.rs2
-rw-r--r--compiler/rustc_mir_transform/src/known_panics_lint.rs77
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs16
-rw-r--r--compiler/rustc_mir_transform/src/lower_intrinsics.rs17
-rw-r--r--compiler/rustc_mir_transform/src/promote_consts.rs4
-rw-r--r--compiler/rustc_mir_transform/src/shim.rs4
-rw-r--r--compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs37
-rw-r--r--compiler/rustc_mir_transform/src/sroa.rs5
-rw-r--r--compiler/rustc_mir_transform/src/validate.rs1525
17 files changed, 1699 insertions, 108 deletions
diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs
index a3e6e5a5a91..ade2ac0080e 100644
--- a/compiler/rustc_mir_transform/src/coroutine.rs
+++ b/compiler/rustc_mir_transform/src/coroutine.rs
@@ -69,7 +69,7 @@ use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
 use rustc_middle::mir::*;
 use rustc_middle::ty::CoroutineArgs;
 use rustc_middle::ty::InstanceDef;
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, CoroutineArgsExt, Ty, TyCtxt};
 use rustc_middle::{bug, span_bug};
 use rustc_mir_dataflow::impls::{
     MaybeBorrowedLocals, MaybeLiveLocals, MaybeRequiresStorage, MaybeStorageLive,
diff --git a/compiler/rustc_mir_transform/src/coverage/mappings.rs b/compiler/rustc_mir_transform/src/coverage/mappings.rs
index 61aabea1d8b..0e209757100 100644
--- a/compiler/rustc_mir_transform/src/coverage/mappings.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mappings.rs
@@ -48,7 +48,7 @@ pub(super) struct MCDCDecision {
     pub(super) span: Span,
     pub(super) end_bcbs: BTreeSet<BasicCoverageBlock>,
     pub(super) bitmap_idx: u32,
-    pub(super) conditions_num: u16,
+    pub(super) num_conditions: u16,
     pub(super) decision_depth: u16,
 }
 
@@ -268,13 +268,13 @@ pub(super) fn extract_mcdc_mappings(
             // the bitmap, rounded up to a whole number of bytes.
             // The decision's "bitmap index" points to its first byte in the bitmap.
             let bitmap_idx = *mcdc_bitmap_bytes;
-            *mcdc_bitmap_bytes += (1_u32 << decision.conditions_num).div_ceil(8);
+            *mcdc_bitmap_bytes += (1_u32 << decision.num_conditions).div_ceil(8);
 
             Some(MCDCDecision {
                 span,
                 end_bcbs,
                 bitmap_idx,
-                conditions_num: decision.conditions_num as u16,
+                num_conditions: decision.num_conditions as u16,
                 decision_depth: decision.decision_depth,
             })
         },
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index 28e0c633d5a..419e39bc386 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -195,9 +195,9 @@ fn create_mappings<'tcx>(
     ));
 
     mappings.extend(mcdc_decisions.iter().filter_map(
-        |&mappings::MCDCDecision { span, bitmap_idx, conditions_num, .. }| {
+        |&mappings::MCDCDecision { span, bitmap_idx, num_conditions, .. }| {
             let code_region = region_for_span(span)?;
-            let kind = MappingKind::MCDCDecision(DecisionInfo { bitmap_idx, conditions_num });
+            let kind = MappingKind::MCDCDecision(DecisionInfo { bitmap_idx, num_conditions });
             Some(Mapping { kind, code_region })
         },
     ));
@@ -269,7 +269,7 @@ fn inject_mcdc_statements<'tcx>(
         span: _,
         ref end_bcbs,
         bitmap_idx,
-        conditions_num: _,
+        num_conditions: _,
         decision_depth,
     } in &extracted_mappings.mcdc_decisions
     {
diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
index 3d24a56cdd7..e88b727a21e 100644
--- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
+++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
@@ -69,7 +69,7 @@ struct ConstAnalysis<'a, 'tcx> {
     map: Map,
     tcx: TyCtxt<'tcx>,
     local_decls: &'a LocalDecls<'tcx>,
-    ecx: InterpCx<'tcx, 'tcx, DummyMachine>,
+    ecx: InterpCx<'tcx, DummyMachine>,
     param_env: ty::ParamEnv<'tcx>,
 }
 
@@ -142,11 +142,10 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
                     _ => return,
                 };
                 if let Some(variant_target_idx) = variant_target {
-                    for (field_index, operand) in operands.iter().enumerate() {
-                        if let Some(field) = self.map().apply(
-                            variant_target_idx,
-                            TrackElem::Field(FieldIdx::from_usize(field_index)),
-                        ) {
+                    for (field_index, operand) in operands.iter_enumerated() {
+                        if let Some(field) =
+                            self.map().apply(variant_target_idx, TrackElem::Field(field_index))
+                        {
                             self.assign_operand(state, field, operand);
                         }
                     }
@@ -165,9 +164,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
                     }
                 }
             }
-            Rvalue::BinaryOp(overflowing_op, box (left, right))
-                if let Some(op) = overflowing_op.overflowing_to_wrapping() =>
-            {
+            Rvalue::BinaryOp(op, box (left, right)) if op.is_overflowing() => {
                 // Flood everything now, so we can use `insert_value_idx` directly later.
                 state.flood(target.as_ref(), self.map());
 
@@ -177,7 +174,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
                 let overflow_target = self.map().apply(target, TrackElem::Field(1_u32.into()));
 
                 if value_target.is_some() || overflow_target.is_some() {
-                    let (val, overflow) = self.binary_op(state, op, left, right);
+                    let (val, overflow) = self.binary_op(state, *op, left, right);
 
                     if let Some(value_target) = value_target {
                         // We have flooded `target` earlier.
@@ -186,7 +183,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
                     if let Some(overflow_target) = overflow_target {
                         let overflow = match overflow {
                             FlatSet::Top => FlatSet::Top,
-                            FlatSet::Elem(overflow) => FlatSet::Elem(Scalar::from_bool(overflow)),
+                            FlatSet::Elem(overflow) => FlatSet::Elem(overflow),
                             FlatSet::Bottom => FlatSet::Bottom,
                         };
                         // We have flooded `target` earlier.
@@ -266,15 +263,16 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
                     FlatSet::Top => FlatSet::Top,
                 }
             }
-            Rvalue::BinaryOp(op, box (left, right)) => {
+            Rvalue::BinaryOp(op, box (left, right)) if !op.is_overflowing() => {
                 // Overflows must be ignored here.
+                // The overflowing operators are handled in `handle_assign`.
                 let (val, _overflow) = self.binary_op(state, *op, left, right);
                 val
             }
             Rvalue::UnaryOp(op, operand) => match self.eval_operand(operand, state) {
                 FlatSet::Elem(value) => self
                     .ecx
-                    .wrapping_unary_op(*op, &value)
+                    .unary_op(*op, &value)
                     .map_or(FlatSet::Top, |val| self.wrap_immediate(*val)),
                 FlatSet::Bottom => FlatSet::Bottom,
                 FlatSet::Top => FlatSet::Top,
@@ -439,7 +437,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
         op: BinOp,
         left: &Operand<'tcx>,
         right: &Operand<'tcx>,
-    ) -> (FlatSet<Scalar>, FlatSet<bool>) {
+    ) -> (FlatSet<Scalar>, FlatSet<Scalar>) {
         let left = self.eval_operand(left, state);
         let right = self.eval_operand(right, state);
 
@@ -447,9 +445,17 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
             (FlatSet::Bottom, _) | (_, FlatSet::Bottom) => (FlatSet::Bottom, FlatSet::Bottom),
             // Both sides are known, do the actual computation.
             (FlatSet::Elem(left), FlatSet::Elem(right)) => {
-                match self.ecx.overflowing_binary_op(op, &left, &right) {
-                    Ok((val, overflow)) => {
-                        (FlatSet::Elem(val.to_scalar()), FlatSet::Elem(overflow))
+                match self.ecx.binary_op(op, &left, &right) {
+                    // Ideally this would return an Immediate, since it's sometimes
+                    // a pair and sometimes not. But as a hack we always return a pair
+                    // and just make the 2nd component `Bottom` when it does not exist.
+                    Ok(val) => {
+                        if matches!(val.layout.abi, Abi::ScalarPair(..)) {
+                            let (val, overflow) = val.to_scalar_pair();
+                            (FlatSet::Elem(val), FlatSet::Elem(overflow))
+                        } else {
+                            (FlatSet::Elem(val.to_scalar()), FlatSet::Bottom)
+                        }
                     }
                     _ => (FlatSet::Top, FlatSet::Top),
                 }
@@ -475,7 +481,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
                         (FlatSet::Elem(arg_scalar), FlatSet::Bottom)
                     }
                     BinOp::Mul if layout.ty.is_integral() && arg_value == 0 => {
-                        (FlatSet::Elem(arg_scalar), FlatSet::Elem(false))
+                        (FlatSet::Elem(arg_scalar), FlatSet::Elem(Scalar::from_bool(false)))
                     }
                     _ => (FlatSet::Top, FlatSet::Top),
                 }
@@ -558,7 +564,7 @@ impl<'tcx, 'locals> Collector<'tcx, 'locals> {
 
     fn try_make_constant(
         &self,
-        ecx: &mut InterpCx<'tcx, 'tcx, DummyMachine>,
+        ecx: &mut InterpCx<'tcx, DummyMachine>,
         place: Place<'tcx>,
         state: &State<FlatSet<Scalar>>,
         map: &Map,
@@ -611,7 +617,7 @@ fn propagatable_scalar(
 
 #[instrument(level = "trace", skip(ecx, state, map))]
 fn try_write_constant<'tcx>(
-    ecx: &mut InterpCx<'_, 'tcx, DummyMachine>,
+    ecx: &mut InterpCx<'tcx, DummyMachine>,
     dest: &PlaceTy<'tcx>,
     place: PlaceIndex,
     ty: Ty<'tcx>,
@@ -829,7 +835,7 @@ impl<'tcx> MutVisitor<'tcx> for Patch<'tcx> {
 struct OperandCollector<'tcx, 'map, 'locals, 'a> {
     state: &'a State<FlatSet<Scalar>>,
     visitor: &'a mut Collector<'tcx, 'locals>,
-    ecx: &'map mut InterpCx<'tcx, 'tcx, DummyMachine>,
+    ecx: &'map mut InterpCx<'tcx, DummyMachine>,
     map: &'map Map,
 }
 
diff --git a/compiler/rustc_mir_transform/src/errors.rs b/compiler/rustc_mir_transform/src/errors.rs
index 0634e321ea3..b28dcb38cb6 100644
--- a/compiler/rustc_mir_transform/src/errors.rs
+++ b/compiler/rustc_mir_transform/src/errors.rs
@@ -1,4 +1,4 @@
-use rustc_errors::{codes::*, Diag, DiagMessage, LintDiagnostic};
+use rustc_errors::{codes::*, Diag, LintDiagnostic};
 use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic};
 use rustc_middle::mir::AssertKind;
 use rustc_middle::ty::TyCtxt;
@@ -50,18 +50,15 @@ pub(crate) enum AssertLintKind {
 
 impl<'a, P: std::fmt::Debug> LintDiagnostic<'a, ()> for AssertLint<P> {
     fn decorate_lint<'b>(self, diag: &'b mut Diag<'a, ()>) {
-        let message = self.assert_kind.diagnostic_message();
+        diag.primary_message(match self.lint_kind {
+            AssertLintKind::ArithmeticOverflow => fluent::mir_transform_arithmetic_overflow,
+            AssertLintKind::UnconditionalPanic => fluent::mir_transform_operation_will_panic,
+        });
+        let label = self.assert_kind.diagnostic_message();
         self.assert_kind.add_args(&mut |name, value| {
             diag.arg(name, value);
         });
-        diag.span_label(self.span, message);
-    }
-
-    fn msg(&self) -> DiagMessage {
-        match self.lint_kind {
-            AssertLintKind::ArithmeticOverflow => fluent::mir_transform_arithmetic_overflow,
-            AssertLintKind::UnconditionalPanic => fluent::mir_transform_operation_will_panic,
-        }
+        diag.span_label(self.span, label);
     }
 }
 
@@ -104,6 +101,7 @@ pub(crate) struct MustNotSupend<'tcx, 'a> {
 // Needed for def_path_str
 impl<'a> LintDiagnostic<'a, ()> for MustNotSupend<'_, '_> {
     fn decorate_lint<'b>(self, diag: &'b mut rustc_errors::Diag<'a, ()>) {
+        diag.primary_message(fluent::mir_transform_must_not_suspend);
         diag.span_label(self.yield_sp, fluent::_subdiag::label);
         if let Some(reason) = self.reason {
             diag.subdiagnostic(diag.dcx, reason);
@@ -113,10 +111,6 @@ impl<'a> LintDiagnostic<'a, ()> for MustNotSupend<'_, '_> {
         diag.arg("def_path", self.tcx.def_path_str(self.def_id));
         diag.arg("post", self.post);
     }
-
-    fn msg(&self) -> rustc_errors::DiagMessage {
-        fluent::mir_transform_must_not_suspend
-    }
 }
 
 #[derive(Subdiagnostic)]
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index 1f3e407180b..fadb5edefdf 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -223,7 +223,7 @@ enum Value<'tcx> {
     NullaryOp(NullOp<'tcx>, Ty<'tcx>),
     UnaryOp(UnOp, VnIndex),
     BinaryOp(BinOp, VnIndex, VnIndex),
-    CheckedBinaryOp(BinOp, VnIndex, VnIndex),
+    CheckedBinaryOp(BinOp, VnIndex, VnIndex), // FIXME get rid of this, work like MIR instead
     Cast {
         kind: CastKind,
         value: VnIndex,
@@ -234,7 +234,7 @@ enum Value<'tcx> {
 
 struct VnState<'body, 'tcx> {
     tcx: TyCtxt<'tcx>,
-    ecx: InterpCx<'tcx, 'tcx, DummyMachine>,
+    ecx: InterpCx<'tcx, DummyMachine>,
     param_env: ty::ParamEnv<'tcx>,
     local_decls: &'body LocalDecls<'tcx>,
     /// Value stored in each local.
@@ -497,7 +497,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             UnaryOp(un_op, operand) => {
                 let operand = self.evaluated[operand].as_ref()?;
                 let operand = self.ecx.read_immediate(operand).ok()?;
-                let (val, _) = self.ecx.overflowing_unary_op(un_op, &operand).ok()?;
+                let val = self.ecx.unary_op(un_op, &operand).ok()?;
                 val.into()
             }
             BinaryOp(bin_op, lhs, rhs) => {
@@ -505,7 +505,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 let lhs = self.ecx.read_immediate(lhs).ok()?;
                 let rhs = self.evaluated[rhs].as_ref()?;
                 let rhs = self.ecx.read_immediate(rhs).ok()?;
-                let (val, _) = self.ecx.overflowing_binary_op(bin_op, &lhs, &rhs).ok()?;
+                let val = self.ecx.binary_op(bin_op, &lhs, &rhs).ok()?;
                 val.into()
             }
             CheckedBinaryOp(bin_op, lhs, rhs) => {
@@ -513,14 +513,11 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 let lhs = self.ecx.read_immediate(lhs).ok()?;
                 let rhs = self.evaluated[rhs].as_ref()?;
                 let rhs = self.ecx.read_immediate(rhs).ok()?;
-                let (val, overflowed) = self.ecx.overflowing_binary_op(bin_op, &lhs, &rhs).ok()?;
-                let tuple = Ty::new_tup_from_iter(
-                    self.tcx,
-                    [val.layout.ty, self.tcx.types.bool].into_iter(),
-                );
-                let tuple = self.ecx.layout_of(tuple).ok()?;
-                ImmTy::from_scalar_pair(val.to_scalar(), Scalar::from_bool(overflowed), tuple)
-                    .into()
+                let val = self
+                    .ecx
+                    .binary_op(bin_op.wrapping_to_overflowing().unwrap(), &lhs, &rhs)
+                    .ok()?;
+                val.into()
             }
             Cast { kind, value, from: _, to } => match kind {
                 CastKind::IntToInt | CastKind::IntToFloat => {
@@ -1142,7 +1139,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
 }
 
 fn op_to_prop_const<'tcx>(
-    ecx: &mut InterpCx<'_, 'tcx, DummyMachine>,
+    ecx: &mut InterpCx<'tcx, DummyMachine>,
     op: &OpTy<'tcx>,
 ) -> Option<ConstValue<'tcx>> {
     // Do not attempt to propagate unsized locals.
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index 401056cd496..fe2237dd2e9 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -1,7 +1,6 @@
 //! Inlining pass for MIR functions
 use crate::deref_separator::deref_finder;
 use rustc_attr::InlineAttr;
-use rustc_const_eval::transform::validate::validate_types;
 use rustc_hir::def::DefKind;
 use rustc_hir::def_id::DefId;
 use rustc_index::bit_set::BitSet;
@@ -21,6 +20,7 @@ use rustc_target::spec::abi::Abi;
 use crate::cost_checker::CostChecker;
 use crate::simplify::simplify_cfg;
 use crate::util;
+use crate::validate::validate_types;
 use std::iter;
 use std::ops::{Range, RangeFrom};
 
diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs
index f1adeab3f88..40db3e38fd3 100644
--- a/compiler/rustc_mir_transform/src/instsimplify.rs
+++ b/compiler/rustc_mir_transform/src/instsimplify.rs
@@ -9,7 +9,6 @@ use rustc_middle::ty::layout::ValidityRequirement;
 use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt};
 use rustc_span::sym;
 use rustc_span::symbol::Symbol;
-use rustc_target::abi::FieldIdx;
 use rustc_target::spec::abi::Abi;
 
 pub struct InstSimplify;
@@ -124,7 +123,7 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
 
     /// Transform `&(*a)` ==> `a`.
     fn simplify_ref_deref(&self, source_info: &SourceInfo, rvalue: &mut Rvalue<'tcx>) {
-        if let Rvalue::Ref(_, _, place) = rvalue {
+        if let Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) = rvalue {
             if let Some((base, ProjectionElem::Deref)) = place.as_ref().last_projection() {
                 if rvalue.ty(self.local_decls, self.tcx) != base.ty(self.local_decls, self.tcx).ty {
                     return;
@@ -217,13 +216,11 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
                     && let Some(place) = operand.place()
                 {
                     let variant = adt_def.non_enum_variant();
-                    for (i, field) in variant.fields.iter().enumerate() {
+                    for (i, field) in variant.fields.iter_enumerated() {
                         let field_ty = field.ty(self.tcx, args);
                         if field_ty == *cast_ty {
-                            let place = place.project_deeper(
-                                &[ProjectionElem::Field(FieldIdx::from_usize(i), *cast_ty)],
-                                self.tcx,
-                            );
+                            let place = place
+                                .project_deeper(&[ProjectionElem::Field(i, *cast_ty)], self.tcx);
                             let operand = if operand.is_move() {
                                 Operand::Move(place)
                             } else {
diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs
index ae807655b68..23cc0c46e73 100644
--- a/compiler/rustc_mir_transform/src/jump_threading.rs
+++ b/compiler/rustc_mir_transform/src/jump_threading.rs
@@ -155,7 +155,7 @@ struct ThreadingOpportunity {
 struct TOFinder<'tcx, 'a> {
     tcx: TyCtxt<'tcx>,
     param_env: ty::ParamEnv<'tcx>,
-    ecx: InterpCx<'tcx, 'tcx, DummyMachine>,
+    ecx: InterpCx<'tcx, DummyMachine>,
     body: &'a Body<'tcx>,
     map: &'a Map,
     loop_headers: &'a BitSet<BasicBlock>,
diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs
index 38fc37a3a31..8b46658b322 100644
--- a/compiler/rustc_mir_transform/src/known_panics_lint.rs
+++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs
@@ -64,7 +64,7 @@ impl<'tcx> MirLint<'tcx> for KnownPanicsLint {
 /// Visits MIR nodes, performs const propagation
 /// and runs lint checks as it goes
 struct ConstPropagator<'mir, 'tcx> {
-    ecx: InterpCx<'mir, 'tcx, DummyMachine>,
+    ecx: InterpCx<'tcx, DummyMachine>,
     tcx: TyCtxt<'tcx>,
     param_env: ParamEnv<'tcx>,
     worklist: Vec<BasicBlock>,
@@ -102,8 +102,12 @@ impl<'tcx> Value<'tcx> {
                 }
                 (PlaceElem::Index(idx), Value::Aggregate { fields, .. }) => {
                     let idx = prop.get_const(idx.into())?.immediate()?;
-                    let idx = prop.ecx.read_target_usize(idx).ok()?;
-                    fields.get(FieldIdx::from_u32(idx.try_into().ok()?)).unwrap_or(&Value::Uninit)
+                    let idx = prop.ecx.read_target_usize(idx).ok()?.try_into().ok()?;
+                    if idx <= FieldIdx::MAX_AS_U32 {
+                        fields.get(FieldIdx::from_u32(idx)).unwrap_or(&Value::Uninit)
+                    } else {
+                        return None;
+                    }
                 }
                 (
                     PlaceElem::ConstantIndex { offset, min_length: _, from_end: false },
@@ -304,20 +308,25 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
 
     fn check_unary_op(&mut self, op: UnOp, arg: &Operand<'tcx>, location: Location) -> Option<()> {
         let arg = self.eval_operand(arg)?;
-        if let (val, true) = self.use_ecx(|this| {
-            let val = this.ecx.read_immediate(&arg)?;
-            let (_res, overflow) = this.ecx.overflowing_unary_op(op, &val)?;
-            Ok((val, overflow))
-        })? {
-            // `AssertKind` only has an `OverflowNeg` variant, so make sure that is
-            // appropriate to use.
-            assert_eq!(op, UnOp::Neg, "Neg is the only UnOp that can overflow");
-            self.report_assert_as_lint(
-                location,
-                AssertLintKind::ArithmeticOverflow,
-                AssertKind::OverflowNeg(val.to_const_int()),
-            );
-            return None;
+        // The only operator that can overflow is `Neg`.
+        if op == UnOp::Neg && arg.layout.ty.is_integral() {
+            // Compute this as `0 - arg` so we can use `SubWithOverflow` to check for overflow.
+            let (arg, overflow) = self.use_ecx(|this| {
+                let arg = this.ecx.read_immediate(&arg)?;
+                let (_res, overflow) = this
+                    .ecx
+                    .binary_op(BinOp::SubWithOverflow, &ImmTy::from_int(0, arg.layout), &arg)?
+                    .to_scalar_pair();
+                Ok((arg, overflow.to_bool()?))
+            })?;
+            if overflow {
+                self.report_assert_as_lint(
+                    location,
+                    AssertLintKind::ArithmeticOverflow,
+                    AssertKind::OverflowNeg(arg.to_const_int()),
+                );
+                return None;
+            }
         }
 
         Some(())
@@ -363,11 +372,20 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
             }
         }
 
-        if let (Some(l), Some(r)) = (l, r) {
-            // The remaining operators are handled through `overflowing_binary_op`.
+        // Div/Rem are handled via the assertions they trigger.
+        // But for Add/Sub/Mul, those assertions only exist in debug builds, and we want to
+        // lint in release builds as well, so we check on the operation instead.
+        // So normalize to the "overflowing" operator, and then ensure that it
+        // actually is an overflowing operator.
+        let op = op.wrapping_to_overflowing().unwrap_or(op);
+        // The remaining operators are handled through `wrapping_to_overflowing`.
+        if let (Some(l), Some(r)) = (l, r)
+            && l.layout.ty.is_integral()
+            && op.is_overflowing()
+        {
             if self.use_ecx(|this| {
-                let (_res, overflow) = this.ecx.overflowing_binary_op(op, &l, &r)?;
-                Ok(overflow)
+                let (_res, overflow) = this.ecx.binary_op(op, &l, &r)?.to_scalar_pair();
+                overflow.to_bool()
             })? {
                 self.report_assert_as_lint(
                     location,
@@ -399,8 +417,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
             }
             Rvalue::BinaryOp(op, box (left, right)) => {
                 trace!("checking BinaryOp(op = {:?}, left = {:?}, right = {:?})", op, left, right);
-                let op = op.overflowing_to_wrapping().unwrap_or(*op);
-                self.check_binary_op(op, left, right, location)?;
+                self.check_binary_op(*op, left, right, location)?;
             }
 
             // Do not try creating references (#67862)
@@ -547,17 +564,15 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                 let right = self.eval_operand(right)?;
                 let right = self.use_ecx(|this| this.ecx.read_immediate(&right))?;
 
-                if let Some(bin_op) = bin_op.overflowing_to_wrapping() {
-                    let (val, overflowed) =
-                        self.use_ecx(|this| this.ecx.overflowing_binary_op(bin_op, &left, &right))?;
-                    let overflowed = ImmTy::from_bool(overflowed, self.tcx);
+                let val = self.use_ecx(|this| this.ecx.binary_op(bin_op, &left, &right))?;
+                if matches!(val.layout.abi, Abi::ScalarPair(..)) {
+                    // FIXME `Value` should properly support pairs in `Immediate`... but currently it does not.
+                    let (val, overflow) = val.to_pair(&self.ecx);
                     Value::Aggregate {
                         variant: VariantIdx::ZERO,
-                        fields: [Value::from(val), overflowed.into()].into_iter().collect(),
+                        fields: [val.into(), overflow.into()].into_iter().collect(),
                     }
                 } else {
-                    let val =
-                        self.use_ecx(|this| this.ecx.wrapping_binary_op(bin_op, &left, &right))?;
                     val.into()
                 }
             }
@@ -566,7 +581,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                 let operand = self.eval_operand(operand)?;
                 let val = self.use_ecx(|this| this.ecx.read_immediate(&operand))?;
 
-                let val = self.use_ecx(|this| this.ecx.wrapping_unary_op(un_op, &val))?;
+                let val = self.use_ecx(|this| this.ecx.unary_op(un_op, &val))?;
                 val.into()
             }
 
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
index 9af48f0bad2..a8741254ffb 100644
--- a/compiler/rustc_mir_transform/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -109,9 +109,9 @@ mod simplify_comparison_integral;
 mod sroa;
 mod unreachable_enum_branching;
 mod unreachable_prop;
+mod validate;
 
-use rustc_const_eval::transform::check_consts::{self, ConstCx};
-use rustc_const_eval::transform::validate;
+use rustc_const_eval::check_consts::{self, ConstCx};
 use rustc_mir_dataflow::rustc_peek;
 
 rustc_fluent_macro::fluent_messages! { "../messages.ftl" }
@@ -211,7 +211,7 @@ fn remap_mir_for_const_eval_select<'tcx>(
 }
 
 fn is_mir_available(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
-    tcx.mir_keys(()).contains(&def_id)
+    tcx.hir().maybe_body_owned_by(def_id).is_some()
 }
 
 /// Finds the full set of `DefId`s within the current crate that have
@@ -222,6 +222,16 @@ fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> {
     // All body-owners have MIR associated with them.
     set.extend(tcx.hir().body_owners());
 
+    // Inline consts' bodies are created in
+    // typeck instead of during ast lowering, like all other bodies so far.
+    for def_id in tcx.hir().body_owners() {
+        // Incremental performance optimization: only load typeck results for things that actually have inline consts
+        if tcx.hir_owner_nodes(tcx.hir().body_owned_by(def_id).id().hir_id.owner).has_inline_consts
+        {
+            set.extend(tcx.typeck(def_id).inline_consts.values())
+        }
+    }
+
     // Additionally, tuple struct/variant constructors have MIR, but
     // they don't have a BodyId, so we need to build them separately.
     struct GatherCtors<'a> {
diff --git a/compiler/rustc_mir_transform/src/lower_intrinsics.rs b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
index 221301b2ceb..3ffc447217d 100644
--- a/compiler/rustc_mir_transform/src/lower_intrinsics.rs
+++ b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
@@ -316,6 +316,23 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
 
                         terminator.kind = TerminatorKind::Goto { target };
                     }
+                    sym::ptr_metadata => {
+                        let Ok([ptr]) = <[_; 1]>::try_from(std::mem::take(args)) else {
+                            span_bug!(
+                                terminator.source_info.span,
+                                "Wrong number of arguments for ptr_metadata intrinsic",
+                            );
+                        };
+                        let target = target.unwrap();
+                        block.statements.push(Statement {
+                            source_info: terminator.source_info,
+                            kind: StatementKind::Assign(Box::new((
+                                *destination,
+                                Rvalue::UnaryOp(UnOp::PtrMetadata, ptr.node),
+                            ))),
+                        });
+                        terminator.kind = TerminatorKind::Goto { target };
+                    }
                     _ => {}
                 }
             }
diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs
index 34aa31baab7..7ec59cc983f 100644
--- a/compiler/rustc_mir_transform/src/promote_consts.rs
+++ b/compiler/rustc_mir_transform/src/promote_consts.rs
@@ -30,7 +30,7 @@ use std::assert_matches::assert_matches;
 use std::cell::Cell;
 use std::{cmp, iter, mem};
 
-use rustc_const_eval::transform::check_consts::{qualifs, ConstCx};
+use rustc_const_eval::check_consts::{qualifs, ConstCx};
 
 /// A `MirPass` for promotion.
 ///
@@ -464,7 +464,7 @@ impl<'tcx> Validator<'_, 'tcx> {
             Rvalue::UnaryOp(op, operand) => {
                 match op {
                     // These operations can never fail.
-                    UnOp::Neg | UnOp::Not => {}
+                    UnOp::Neg | UnOp::Not | UnOp::PtrMetadata => {}
                 }
 
                 self.validate_operand(operand)?;
diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs
index dcf54ad2cfc..d03c2d18c0c 100644
--- a/compiler/rustc_mir_transform/src/shim.rs
+++ b/compiler/rustc_mir_transform/src/shim.rs
@@ -4,7 +4,7 @@ use rustc_hir::lang_items::LangItem;
 use rustc_middle::mir::*;
 use rustc_middle::query::Providers;
 use rustc_middle::ty::GenericArgs;
-use rustc_middle::ty::{self, CoroutineArgs, EarlyBinder, Ty, TyCtxt};
+use rustc_middle::ty::{self, CoroutineArgs, CoroutineArgsExt, EarlyBinder, Ty, TyCtxt};
 use rustc_middle::{bug, span_bug};
 use rustc_target::abi::{FieldIdx, VariantIdx, FIRST_VARIANT};
 
@@ -634,7 +634,7 @@ impl<'tcx> CloneShimBuilder<'tcx> {
         dest: Place<'tcx>,
         src: Place<'tcx>,
         coroutine_def_id: DefId,
-        args: CoroutineArgs<'tcx>,
+        args: CoroutineArgs<TyCtxt<'tcx>>,
     ) {
         self.block(vec![], TerminatorKind::Goto { target: self.block_index_offset(3) }, false);
         let unwind = self.block(vec![], TerminatorKind::UnwindResume, true);
diff --git a/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs b/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs
index f4481c22fc1..aa9c87d8f80 100644
--- a/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs
+++ b/compiler/rustc_mir_transform/src/shim/async_destructor_ctor.rs
@@ -12,7 +12,7 @@ use rustc_middle::mir::{
     Terminator, TerminatorKind, UnwindAction, UnwindTerminateReason, RETURN_PLACE,
 };
 use rustc_middle::ty::adjustment::PointerCoercion;
-use rustc_middle::ty::util::Discr;
+use rustc_middle::ty::util::{AsyncDropGlueMorphology, Discr};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_middle::{bug, span_bug};
 use rustc_span::source_map::respan;
@@ -116,15 +116,25 @@ impl<'tcx> AsyncDestructorCtorShimBuilder<'tcx> {
     }
 
     fn build(self) -> Body<'tcx> {
-        let (tcx, def_id, Some(self_ty)) = (self.tcx, self.def_id, self.self_ty) else {
+        let (tcx, Some(self_ty)) = (self.tcx, self.self_ty) else {
             return self.build_zst_output();
         };
+        match self_ty.async_drop_glue_morphology(tcx) {
+            AsyncDropGlueMorphology::Noop => span_bug!(
+                self.span,
+                "async drop glue shim generator encountered type with noop async drop glue morphology"
+            ),
+            AsyncDropGlueMorphology::DeferredDropInPlace => {
+                return self.build_deferred_drop_in_place();
+            }
+            AsyncDropGlueMorphology::Custom => (),
+        }
 
         let surface_drop_kind = || {
-            let param_env = tcx.param_env_reveal_all_normalized(def_id);
-            if self_ty.has_surface_async_drop(tcx, param_env) {
+            let adt_def = self_ty.ty_adt_def()?;
+            if adt_def.async_destructor(tcx).is_some() {
                 Some(SurfaceDropKind::Async)
-            } else if self_ty.has_surface_drop(tcx, param_env) {
+            } else if adt_def.destructor(tcx).is_some() {
                 Some(SurfaceDropKind::Sync)
             } else {
                 None
@@ -267,6 +277,13 @@ impl<'tcx> AsyncDestructorCtorShimBuilder<'tcx> {
         self.return_()
     }
 
+    fn build_deferred_drop_in_place(mut self) -> Body<'tcx> {
+        self.put_self();
+        let deferred = self.combine_deferred_drop_in_place();
+        self.combine_fuse(deferred);
+        self.return_()
+    }
+
     fn build_fused_async_surface(mut self) -> Body<'tcx> {
         self.put_self();
         let surface = self.combine_async_surface();
@@ -441,6 +458,14 @@ impl<'tcx> AsyncDestructorCtorShimBuilder<'tcx> {
         )
     }
 
+    fn combine_deferred_drop_in_place(&mut self) -> Ty<'tcx> {
+        self.apply_combinator(
+            1,
+            LangItem::AsyncDropDeferredDropInPlace,
+            &[self.self_ty.unwrap().into()],
+        )
+    }
+
     fn combine_fuse(&mut self, inner_future_ty: Ty<'tcx>) -> Ty<'tcx> {
         self.apply_combinator(1, LangItem::AsyncDropFuse, &[inner_future_ty.into()])
     }
@@ -481,7 +506,7 @@ impl<'tcx> AsyncDestructorCtorShimBuilder<'tcx> {
         if let Some(ty) = self.self_ty {
             debug_assert_eq!(
                 output.ty(&self.locals, self.tcx),
-                ty.async_destructor_ty(self.tcx, self.param_env),
+                ty.async_destructor_ty(self.tcx),
                 "output async destructor types did not match for type: {ty:?}",
             );
         }
diff --git a/compiler/rustc_mir_transform/src/sroa.rs b/compiler/rustc_mir_transform/src/sroa.rs
index cdf3305b560..f19c34cae7a 100644
--- a/compiler/rustc_mir_transform/src/sroa.rs
+++ b/compiler/rustc_mir_transform/src/sroa.rs
@@ -70,6 +70,11 @@ fn escaping_locals<'tcx>(
                 // Exclude #[repr(simd)] types so that they are not de-optimized into an array
                 return true;
             }
+            if Some(def.did()) == tcx.lang_items().dyn_metadata() {
+                // codegen wants to see the `DynMetadata<T>`,
+                // not the inner reference-to-opaque-type.
+                return true;
+            }
             // We already excluded unions and enums, so this ADT must have one variant
             let variant = def.variant(FIRST_VARIANT);
             if variant.fields.len() > 1 {
diff --git a/compiler/rustc_mir_transform/src/validate.rs b/compiler/rustc_mir_transform/src/validate.rs
new file mode 100644
index 00000000000..3b4d4c93877
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/validate.rs
@@ -0,0 +1,1525 @@
+//! Validates the MIR to ensure that invariants are upheld.
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_index::bit_set::BitSet;
+use rustc_index::IndexVec;
+use rustc_infer::traits::Reveal;
+use rustc_middle::mir::coverage::CoverageKind;
+use rustc_middle::mir::interpret::Scalar;
+use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::adjustment::PointerCoercion;
+use rustc_middle::ty::{
+    self, CoroutineArgsExt, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt, Variance,
+};
+use rustc_middle::{bug, span_bug};
+use rustc_target::abi::{Size, FIRST_VARIANT};
+use rustc_target::spec::abi::Abi;
+
+use crate::util::is_within_packed;
+
+use crate::util::relate_types;
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum EdgeKind {
+    Unwind,
+    Normal,
+}
+
+pub struct Validator {
+    /// Describes at which point in the pipeline this validation is happening.
+    pub when: String,
+    /// The phase for which we are upholding the dialect. If the given phase forbids a specific
+    /// element, this validator will now emit errors if that specific element is encountered.
+    /// Note that phases that change the dialect cause all *following* phases to check the
+    /// invariants of the new dialect. A phase that changes dialects never checks the new invariants
+    /// itself.
+    pub mir_phase: MirPhase,
+}
+
+impl<'tcx> MirPass<'tcx> for Validator {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        // FIXME(JakobDegen): These bodies never instantiated in codegend anyway, so it's not
+        // terribly important that they pass the validator. However, I think other passes might
+        // still see them, in which case they might be surprised. It would probably be better if we
+        // didn't put this through the MIR pipeline at all.
+        if matches!(body.source.instance, InstanceDef::Intrinsic(..) | InstanceDef::Virtual(..)) {
+            return;
+        }
+        let def_id = body.source.def_id();
+        let mir_phase = self.mir_phase;
+        let param_env = match mir_phase.reveal() {
+            Reveal::UserFacing => tcx.param_env(def_id),
+            Reveal::All => tcx.param_env_reveal_all_normalized(def_id),
+        };
+
+        let can_unwind = if mir_phase <= MirPhase::Runtime(RuntimePhase::Initial) {
+            // In this case `AbortUnwindingCalls` haven't yet been executed.
+            true
+        } else if !tcx.def_kind(def_id).is_fn_like() {
+            true
+        } else {
+            let body_ty = tcx.type_of(def_id).skip_binder();
+            let body_abi = match body_ty.kind() {
+                ty::FnDef(..) => body_ty.fn_sig(tcx).abi(),
+                ty::Closure(..) => Abi::RustCall,
+                ty::CoroutineClosure(..) => Abi::RustCall,
+                ty::Coroutine(..) => Abi::Rust,
+                // No need to do MIR validation on error bodies
+                ty::Error(_) => return,
+                _ => {
+                    span_bug!(body.span, "unexpected body ty: {:?} phase {:?}", body_ty, mir_phase)
+                }
+            };
+
+            ty::layout::fn_can_unwind(tcx, Some(def_id), body_abi)
+        };
+
+        let mut cfg_checker = CfgChecker {
+            when: &self.when,
+            body,
+            tcx,
+            mir_phase,
+            unwind_edge_count: 0,
+            reachable_blocks: traversal::reachable_as_bitset(body),
+            value_cache: FxHashSet::default(),
+            can_unwind,
+        };
+        cfg_checker.visit_body(body);
+        cfg_checker.check_cleanup_control_flow();
+
+        // Also run the TypeChecker.
+        for (location, msg) in validate_types(tcx, self.mir_phase, param_env, body, body) {
+            cfg_checker.fail(location, msg);
+        }
+
+        if let MirPhase::Runtime(_) = body.phase {
+            if let ty::InstanceDef::Item(_) = body.source.instance {
+                if body.has_free_regions() {
+                    cfg_checker.fail(
+                        Location::START,
+                        format!("Free regions in optimized {} MIR", body.phase.name()),
+                    );
+                }
+            }
+        }
+
+        // Enforce that coroutine-closure layouts are identical.
+        if let Some(layout) = body.coroutine_layout_raw()
+            && let Some(by_move_body) = body.coroutine_by_move_body()
+            && let Some(by_move_layout) = by_move_body.coroutine_layout_raw()
+        {
+            // FIXME(async_closures): We could do other validation here?
+            if layout.variant_fields.len() != by_move_layout.variant_fields.len() {
+                cfg_checker.fail(
+                    Location::START,
+                    format!(
+                        "Coroutine layout has different number of variant fields from \
+                        by-move coroutine layout:\n\
+                        layout: {layout:#?}\n\
+                        by_move_layout: {by_move_layout:#?}",
+                    ),
+                );
+            }
+        }
+    }
+}
+
+struct CfgChecker<'a, 'tcx> {
+    when: &'a str,
+    body: &'a Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    mir_phase: MirPhase,
+    unwind_edge_count: usize,
+    reachable_blocks: BitSet<BasicBlock>,
+    value_cache: FxHashSet<u128>,
+    // If `false`, then the MIR must not contain `UnwindAction::Continue` or
+    // `TerminatorKind::Resume`.
+    can_unwind: bool,
+}
+
+impl<'a, 'tcx> CfgChecker<'a, 'tcx> {
+    #[track_caller]
+    fn fail(&self, location: Location, msg: impl AsRef<str>) {
+        // We might see broken MIR when other errors have already occurred.
+        assert!(
+            self.tcx.dcx().has_errors().is_some(),
+            "broken MIR in {:?} ({}) at {:?}:\n{}",
+            self.body.source.instance,
+            self.when,
+            location,
+            msg.as_ref(),
+        );
+    }
+
+    fn check_edge(&mut self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
+        if bb == START_BLOCK {
+            self.fail(location, "start block must not have predecessors")
+        }
+        if let Some(bb) = self.body.basic_blocks.get(bb) {
+            let src = self.body.basic_blocks.get(location.block).unwrap();
+            match (src.is_cleanup, bb.is_cleanup, edge_kind) {
+                // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
+                (false, false, EdgeKind::Normal)
+                // Cleanup blocks can jump to cleanup blocks along non-unwind edges
+                | (true, true, EdgeKind::Normal) => {}
+                // Non-cleanup blocks can jump to cleanup blocks along unwind edges
+                (false, true, EdgeKind::Unwind) => {
+                    self.unwind_edge_count += 1;
+                }
+                // All other jumps are invalid
+                _ => {
+                    self.fail(
+                        location,
+                        format!(
+                            "{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})",
+                            edge_kind,
+                            bb,
+                            src.is_cleanup,
+                            bb.is_cleanup,
+                        )
+                    )
+                }
+            }
+        } else {
+            self.fail(location, format!("encountered jump to invalid basic block {bb:?}"))
+        }
+    }
+
+    fn check_cleanup_control_flow(&self) {
+        if self.unwind_edge_count <= 1 {
+            return;
+        }
+        let doms = self.body.basic_blocks.dominators();
+        let mut post_contract_node = FxHashMap::default();
+        // Reusing the allocation across invocations of the closure
+        let mut dom_path = vec![];
+        let mut get_post_contract_node = |mut bb| {
+            let root = loop {
+                if let Some(root) = post_contract_node.get(&bb) {
+                    break *root;
+                }
+                let parent = doms.immediate_dominator(bb).unwrap();
+                dom_path.push(bb);
+                if !self.body.basic_blocks[parent].is_cleanup {
+                    break bb;
+                }
+                bb = parent;
+            };
+            for bb in dom_path.drain(..) {
+                post_contract_node.insert(bb, root);
+            }
+            root
+        };
+
+        let mut parent = IndexVec::from_elem(None, &self.body.basic_blocks);
+        for (bb, bb_data) in self.body.basic_blocks.iter_enumerated() {
+            if !bb_data.is_cleanup || !self.reachable_blocks.contains(bb) {
+                continue;
+            }
+            let bb = get_post_contract_node(bb);
+            for s in bb_data.terminator().successors() {
+                let s = get_post_contract_node(s);
+                if s == bb {
+                    continue;
+                }
+                let parent = &mut parent[bb];
+                match parent {
+                    None => {
+                        *parent = Some(s);
+                    }
+                    Some(e) if *e == s => (),
+                    Some(e) => self.fail(
+                        Location { block: bb, statement_index: 0 },
+                        format!(
+                            "Cleanup control flow violation: The blocks dominated by {:?} have edges to both {:?} and {:?}",
+                            bb,
+                            s,
+                            *e
+                        )
+                    ),
+                }
+            }
+        }
+
+        // Check for cycles
+        let mut stack = FxHashSet::default();
+        for i in 0..parent.len() {
+            let mut bb = BasicBlock::from_usize(i);
+            stack.clear();
+            stack.insert(bb);
+            loop {
+                let Some(parent) = parent[bb].take() else { break };
+                let no_cycle = stack.insert(parent);
+                if !no_cycle {
+                    self.fail(
+                        Location { block: bb, statement_index: 0 },
+                        format!(
+                            "Cleanup control flow violation: Cycle involving edge {bb:?} -> {parent:?}",
+                        ),
+                    );
+                    break;
+                }
+                bb = parent;
+            }
+        }
+    }
+
+    fn check_unwind_edge(&mut self, location: Location, unwind: UnwindAction) {
+        let is_cleanup = self.body.basic_blocks[location.block].is_cleanup;
+        match unwind {
+            UnwindAction::Cleanup(unwind) => {
+                if is_cleanup {
+                    self.fail(location, "`UnwindAction::Cleanup` in cleanup block");
+                }
+                self.check_edge(location, unwind, EdgeKind::Unwind);
+            }
+            UnwindAction::Continue => {
+                if is_cleanup {
+                    self.fail(location, "`UnwindAction::Continue` in cleanup block");
+                }
+
+                if !self.can_unwind {
+                    self.fail(location, "`UnwindAction::Continue` in no-unwind function");
+                }
+            }
+            UnwindAction::Terminate(UnwindTerminateReason::InCleanup) => {
+                if !is_cleanup {
+                    self.fail(
+                        location,
+                        "`UnwindAction::Terminate(InCleanup)` in a non-cleanup block",
+                    );
+                }
+            }
+            // These are allowed everywhere.
+            UnwindAction::Unreachable | UnwindAction::Terminate(UnwindTerminateReason::Abi) => (),
+        }
+    }
+
+    fn is_critical_call_edge(&self, target: Option<BasicBlock>, unwind: UnwindAction) -> bool {
+        let Some(target) = target else { return false };
+        matches!(unwind, UnwindAction::Cleanup(_) | UnwindAction::Terminate(_))
+            && self.body.basic_blocks.predecessors()[target].len() > 1
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
+    fn visit_local(&mut self, local: Local, _context: PlaceContext, location: Location) {
+        if self.body.local_decls.get(local).is_none() {
+            self.fail(
+                location,
+                format!("local {local:?} has no corresponding declaration in `body.local_decls`"),
+            );
+        }
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        match &statement.kind {
+            StatementKind::AscribeUserType(..) => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`AscribeUserType` should have been removed after drop lowering phase",
+                    );
+                }
+            }
+            StatementKind::FakeRead(..) => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`FakeRead` should have been removed after drop lowering phase",
+                    );
+                }
+            }
+            StatementKind::SetDiscriminant { .. } => {
+                if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
+                }
+            }
+            StatementKind::Deinit(..) => {
+                if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(location, "`Deinit`is not allowed until deaggregation");
+                }
+            }
+            StatementKind::Retag(kind, _) => {
+                // FIXME(JakobDegen) The validator should check that `self.mir_phase <
+                // DropsLowered`. However, this causes ICEs with generation of drop shims, which
+                // seem to fail to set their `MirPhase` correctly.
+                if matches!(kind, RetagKind::TwoPhase) {
+                    self.fail(location, format!("explicit `{kind:?}` is forbidden"));
+                }
+            }
+            StatementKind::Coverage(kind) => {
+                if self.mir_phase >= MirPhase::Analysis(AnalysisPhase::PostCleanup)
+                    && let CoverageKind::BlockMarker { .. } | CoverageKind::SpanMarker { .. } = kind
+                {
+                    self.fail(
+                        location,
+                        format!("{kind:?} should have been removed after analysis"),
+                    );
+                }
+            }
+            StatementKind::Assign(..)
+            | StatementKind::StorageLive(_)
+            | StatementKind::StorageDead(_)
+            | StatementKind::Intrinsic(_)
+            | StatementKind::ConstEvalCounter
+            | StatementKind::PlaceMention(..)
+            | StatementKind::Nop => {}
+        }
+
+        self.super_statement(statement, location);
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        match &terminator.kind {
+            TerminatorKind::Goto { target } => {
+                self.check_edge(location, *target, EdgeKind::Normal);
+            }
+            TerminatorKind::SwitchInt { targets, discr: _ } => {
+                for (_, target) in targets.iter() {
+                    self.check_edge(location, target, EdgeKind::Normal);
+                }
+                self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
+
+                self.value_cache.clear();
+                self.value_cache.extend(targets.iter().map(|(value, _)| value));
+                let has_duplicates = targets.iter().len() != self.value_cache.len();
+                if has_duplicates {
+                    self.fail(
+                        location,
+                        format!(
+                            "duplicated values in `SwitchInt` terminator: {:?}",
+                            terminator.kind,
+                        ),
+                    );
+                }
+            }
+            TerminatorKind::Drop { target, unwind, .. } => {
+                self.check_edge(location, *target, EdgeKind::Normal);
+                self.check_unwind_edge(location, *unwind);
+            }
+            TerminatorKind::Call { args, destination, target, unwind, .. } => {
+                if let Some(target) = target {
+                    self.check_edge(location, *target, EdgeKind::Normal);
+                }
+                self.check_unwind_edge(location, *unwind);
+
+                // The code generation assumes that there are no critical call edges. The assumption
+                // is used to simplify inserting code that should be executed along the return edge
+                // from the call. FIXME(tmiasko): Since this is a strictly code generation concern,
+                // the code generation should be responsible for handling it.
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Optimized)
+                    && self.is_critical_call_edge(*target, *unwind)
+                {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered critical edge in `Call` terminator {:?}",
+                            terminator.kind,
+                        ),
+                    );
+                }
+
+                // The call destination place and Operand::Move place used as an argument might be
+                // passed by a reference to the callee. Consequently they cannot be packed.
+                if is_within_packed(self.tcx, &self.body.local_decls, *destination).is_some() {
+                    // This is bad! The callee will expect the memory to be aligned.
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered packed place in `Call` terminator destination: {:?}",
+                            terminator.kind,
+                        ),
+                    );
+                }
+                for arg in args {
+                    if let Operand::Move(place) = &arg.node {
+                        if is_within_packed(self.tcx, &self.body.local_decls, *place).is_some() {
+                            // This is bad! The callee will expect the memory to be aligned.
+                            self.fail(
+                                location,
+                                format!(
+                                    "encountered `Move` of a packed place in `Call` terminator: {:?}",
+                                    terminator.kind,
+                                ),
+                            );
+                        }
+                    }
+                }
+            }
+            TerminatorKind::Assert { target, unwind, .. } => {
+                self.check_edge(location, *target, EdgeKind::Normal);
+                self.check_unwind_edge(location, *unwind);
+            }
+            TerminatorKind::Yield { resume, drop, .. } => {
+                if self.body.coroutine.is_none() {
+                    self.fail(location, "`Yield` cannot appear outside coroutine bodies");
+                }
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(location, "`Yield` should have been replaced by coroutine lowering");
+                }
+                self.check_edge(location, *resume, EdgeKind::Normal);
+                if let Some(drop) = drop {
+                    self.check_edge(location, *drop, EdgeKind::Normal);
+                }
+            }
+            TerminatorKind::FalseEdge { real_target, imaginary_target } => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`FalseEdge` should have been removed after drop elaboration",
+                    );
+                }
+                self.check_edge(location, *real_target, EdgeKind::Normal);
+                self.check_edge(location, *imaginary_target, EdgeKind::Normal);
+            }
+            TerminatorKind::FalseUnwind { real_target, unwind } => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`FalseUnwind` should have been removed after drop elaboration",
+                    );
+                }
+                self.check_edge(location, *real_target, EdgeKind::Normal);
+                self.check_unwind_edge(location, *unwind);
+            }
+            TerminatorKind::InlineAsm { targets, unwind, .. } => {
+                for &target in targets {
+                    self.check_edge(location, target, EdgeKind::Normal);
+                }
+                self.check_unwind_edge(location, *unwind);
+            }
+            TerminatorKind::CoroutineDrop => {
+                if self.body.coroutine.is_none() {
+                    self.fail(location, "`CoroutineDrop` cannot appear outside coroutine bodies");
+                }
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`CoroutineDrop` should have been replaced by coroutine lowering",
+                    );
+                }
+            }
+            TerminatorKind::UnwindResume => {
+                let bb = location.block;
+                if !self.body.basic_blocks[bb].is_cleanup {
+                    self.fail(location, "Cannot `UnwindResume` from non-cleanup basic block")
+                }
+                if !self.can_unwind {
+                    self.fail(location, "Cannot `UnwindResume` in a function that cannot unwind")
+                }
+            }
+            TerminatorKind::UnwindTerminate(_) => {
+                let bb = location.block;
+                if !self.body.basic_blocks[bb].is_cleanup {
+                    self.fail(location, "Cannot `UnwindTerminate` from non-cleanup basic block")
+                }
+            }
+            TerminatorKind::Return => {
+                let bb = location.block;
+                if self.body.basic_blocks[bb].is_cleanup {
+                    self.fail(location, "Cannot `Return` from cleanup basic block")
+                }
+            }
+            TerminatorKind::Unreachable => {}
+        }
+
+        self.super_terminator(terminator, location);
+    }
+
+    fn visit_source_scope(&mut self, scope: SourceScope) {
+        if self.body.source_scopes.get(scope).is_none() {
+            self.tcx.dcx().span_bug(
+                self.body.span,
+                format!(
+                    "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
+                    self.body.source.instance, self.when, scope,
+                ),
+            );
+        }
+    }
+}
+
+/// A faster version of the validation pass that only checks those things which may break when
+/// instantiating any generic parameters.
+///
+/// `caller_body` is used to detect cycles in MIR inlining and MIR validation before
+/// `optimized_mir` is available.
+pub fn validate_types<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    mir_phase: MirPhase,
+    param_env: ty::ParamEnv<'tcx>,
+    body: &Body<'tcx>,
+    caller_body: &Body<'tcx>,
+) -> Vec<(Location, String)> {
+    let mut type_checker =
+        TypeChecker { body, caller_body, tcx, param_env, mir_phase, failures: Vec::new() };
+    type_checker.visit_body(body);
+    type_checker.failures
+}
+
+struct TypeChecker<'a, 'tcx> {
+    body: &'a Body<'tcx>,
+    caller_body: &'a Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    mir_phase: MirPhase,
+    failures: Vec<(Location, String)>,
+}
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+    fn fail(&mut self, location: Location, msg: impl Into<String>) {
+        self.failures.push((location, msg.into()));
+    }
+
+    /// Check if src can be assigned into dest.
+    /// This is not precise, it will accept some incorrect assignments.
+    fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
+        // Fast path before we normalize.
+        if src == dest {
+            // Equal types, all is good.
+            return true;
+        }
+
+        // We sometimes have to use `defining_opaque_types` for subtyping
+        // to succeed here and figuring out how exactly that should work
+        // is annoying. It is harmless enough to just not validate anything
+        // in that case. We still check this after analysis as all opaque
+        // types have been revealed at this point.
+        if (src, dest).has_opaque_types() {
+            return true;
+        }
+
+        // After borrowck subtyping should be fully explicit via
+        // `Subtype` projections.
+        let variance = if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+            Variance::Invariant
+        } else {
+            Variance::Covariant
+        };
+
+        crate::util::relate_types(self.tcx, self.param_env, variance, src, dest)
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
+    fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
+        // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
+        if self.tcx.sess.opts.unstable_opts.validate_mir
+            && self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial)
+        {
+            // `Operand::Copy` is only supposed to be used with `Copy` types.
+            if let Operand::Copy(place) = operand {
+                let ty = place.ty(&self.body.local_decls, self.tcx).ty;
+
+                if !ty.is_copy_modulo_regions(self.tcx, self.param_env) {
+                    self.fail(location, format!("`Operand::Copy` with non-`Copy` type {ty}"));
+                }
+            }
+        }
+
+        self.super_operand(operand, location);
+    }
+
+    fn visit_projection_elem(
+        &mut self,
+        place_ref: PlaceRef<'tcx>,
+        elem: PlaceElem<'tcx>,
+        context: PlaceContext,
+        location: Location,
+    ) {
+        match elem {
+            ProjectionElem::OpaqueCast(ty)
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) =>
+            {
+                self.fail(
+                    location,
+                    format!("explicit opaque type cast to `{ty}` after `RevealAll`"),
+                )
+            }
+            ProjectionElem::Index(index) => {
+                let index_ty = self.body.local_decls[index].ty;
+                if index_ty != self.tcx.types.usize {
+                    self.fail(location, format!("bad index ({index_ty:?} != usize)"))
+                }
+            }
+            ProjectionElem::Deref
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::PostCleanup) =>
+            {
+                let base_ty = place_ref.ty(&self.body.local_decls, self.tcx).ty;
+
+                if base_ty.is_box() {
+                    self.fail(
+                        location,
+                        format!("{base_ty:?} dereferenced after ElaborateBoxDerefs"),
+                    )
+                }
+            }
+            ProjectionElem::Field(f, ty) => {
+                let parent_ty = place_ref.ty(&self.body.local_decls, self.tcx);
+                let fail_out_of_bounds = |this: &mut Self, location| {
+                    this.fail(location, format!("Out of bounds field {f:?} for {parent_ty:?}"));
+                };
+                let check_equal = |this: &mut Self, location, f_ty| {
+                    if !this.mir_assign_valid_types(ty, f_ty) {
+                        this.fail(
+                            location,
+                            format!(
+                                "Field projection `{place_ref:?}.{f:?}` specified type `{ty:?}`, but actual type is `{f_ty:?}`"
+                            )
+                        )
+                    }
+                };
+
+                let kind = match parent_ty.ty.kind() {
+                    &ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
+                        self.tcx.type_of(def_id).instantiate(self.tcx, args).kind()
+                    }
+                    kind => kind,
+                };
+
+                match kind {
+                    ty::Tuple(fields) => {
+                        let Some(f_ty) = fields.get(f.as_usize()) else {
+                            fail_out_of_bounds(self, location);
+                            return;
+                        };
+                        check_equal(self, location, *f_ty);
+                    }
+                    ty::Adt(adt_def, args) => {
+                        // see <https://github.com/rust-lang/rust/blob/7601adcc764d42c9f2984082b49948af652df986/compiler/rustc_middle/src/ty/layout.rs#L861-L864>
+                        if Some(adt_def.did()) == self.tcx.lang_items().dyn_metadata() {
+                            self.fail(
+                                location,
+                                format!(
+                                    "You can't project to field {f:?} of `DynMetadata` because \
+                                     layout is weird and thinks it doesn't have fields."
+                                ),
+                            );
+                        }
+
+                        let var = parent_ty.variant_index.unwrap_or(FIRST_VARIANT);
+                        let Some(field) = adt_def.variant(var).fields.get(f) else {
+                            fail_out_of_bounds(self, location);
+                            return;
+                        };
+                        check_equal(self, location, field.ty(self.tcx, args));
+                    }
+                    ty::Closure(_, args) => {
+                        let args = args.as_closure();
+                        let Some(&f_ty) = args.upvar_tys().get(f.as_usize()) else {
+                            fail_out_of_bounds(self, location);
+                            return;
+                        };
+                        check_equal(self, location, f_ty);
+                    }
+                    ty::CoroutineClosure(_, args) => {
+                        let args = args.as_coroutine_closure();
+                        let Some(&f_ty) = args.upvar_tys().get(f.as_usize()) else {
+                            fail_out_of_bounds(self, location);
+                            return;
+                        };
+                        check_equal(self, location, f_ty);
+                    }
+                    &ty::Coroutine(def_id, args) => {
+                        let f_ty = if let Some(var) = parent_ty.variant_index {
+                            // If we're currently validating an inlined copy of this body,
+                            // then it will no longer be parameterized over the original
+                            // args of the coroutine. Otherwise, we prefer to use this body
+                            // since we may be in the process of computing this MIR in the
+                            // first place.
+                            let layout = if def_id == self.caller_body.source.def_id() {
+                                // FIXME: This is not right for async closures.
+                                self.caller_body.coroutine_layout_raw()
+                            } else {
+                                self.tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty())
+                            };
+
+                            let Some(layout) = layout else {
+                                self.fail(
+                                    location,
+                                    format!("No coroutine layout for {parent_ty:?}"),
+                                );
+                                return;
+                            };
+
+                            let Some(&local) = layout.variant_fields[var].get(f) else {
+                                fail_out_of_bounds(self, location);
+                                return;
+                            };
+
+                            let Some(f_ty) = layout.field_tys.get(local) else {
+                                self.fail(
+                                    location,
+                                    format!("Out of bounds local {local:?} for {parent_ty:?}"),
+                                );
+                                return;
+                            };
+
+                            ty::EarlyBinder::bind(f_ty.ty).instantiate(self.tcx, args)
+                        } else {
+                            let Some(&f_ty) = args.as_coroutine().prefix_tys().get(f.index())
+                            else {
+                                fail_out_of_bounds(self, location);
+                                return;
+                            };
+
+                            f_ty
+                        };
+
+                        check_equal(self, location, f_ty);
+                    }
+                    _ => {
+                        self.fail(location, format!("{:?} does not have fields", parent_ty.ty));
+                    }
+                }
+            }
+            ProjectionElem::Subtype(ty) => {
+                if !relate_types(
+                    self.tcx,
+                    self.param_env,
+                    Variance::Covariant,
+                    ty,
+                    place_ref.ty(&self.body.local_decls, self.tcx).ty,
+                ) {
+                    self.fail(
+                        location,
+                        format!(
+                            "Failed subtyping {ty:#?} and {:#?}",
+                            place_ref.ty(&self.body.local_decls, self.tcx).ty
+                        ),
+                    )
+                }
+            }
+            _ => {}
+        }
+        self.super_projection_elem(place_ref, elem, context, location);
+    }
+
+    fn visit_var_debug_info(&mut self, debuginfo: &VarDebugInfo<'tcx>) {
+        if let Some(box VarDebugInfoFragment { ty, ref projection }) = debuginfo.composite {
+            if ty.is_union() || ty.is_enum() {
+                self.fail(
+                    START_BLOCK.start_location(),
+                    format!("invalid type {ty:?} in debuginfo for {:?}", debuginfo.name),
+                );
+            }
+            if projection.is_empty() {
+                self.fail(
+                    START_BLOCK.start_location(),
+                    format!("invalid empty projection in debuginfo for {:?}", debuginfo.name),
+                );
+            }
+            if projection.iter().any(|p| !matches!(p, PlaceElem::Field(..))) {
+                self.fail(
+                    START_BLOCK.start_location(),
+                    format!(
+                        "illegal projection {:?} in debuginfo for {:?}",
+                        projection, debuginfo.name
+                    ),
+                );
+            }
+        }
+        match debuginfo.value {
+            VarDebugInfoContents::Const(_) => {}
+            VarDebugInfoContents::Place(place) => {
+                if place.projection.iter().any(|p| !p.can_use_in_debuginfo()) {
+                    self.fail(
+                        START_BLOCK.start_location(),
+                        format!("illegal place {:?} in debuginfo for {:?}", place, debuginfo.name),
+                    );
+                }
+            }
+        }
+        self.super_var_debug_info(debuginfo);
+    }
+
+    fn visit_place(&mut self, place: &Place<'tcx>, cntxt: PlaceContext, location: Location) {
+        // Set off any `bug!`s in the type computation code
+        let _ = place.ty(&self.body.local_decls, self.tcx);
+
+        if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial)
+            && place.projection.len() > 1
+            && cntxt != PlaceContext::NonUse(NonUseContext::VarDebugInfo)
+            && place.projection[1..].contains(&ProjectionElem::Deref)
+        {
+            self.fail(
+                location,
+                format!("place {place:?} has deref as a later projection (it is only permitted as the first projection)"),
+            );
+        }
+
+        // Ensure all downcast projections are followed by field projections.
+        let mut projections_iter = place.projection.iter();
+        while let Some(proj) = projections_iter.next() {
+            if matches!(proj, ProjectionElem::Downcast(..)) {
+                if !matches!(projections_iter.next(), Some(ProjectionElem::Field(..))) {
+                    self.fail(
+                        location,
+                        format!(
+                            "place {place:?} has `Downcast` projection not followed by `Field`"
+                        ),
+                    );
+                }
+            }
+        }
+
+        self.super_place(place, cntxt, location);
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+        macro_rules! check_kinds {
+            ($t:expr, $text:literal, $typat:pat) => {
+                if !matches!(($t).kind(), $typat) {
+                    self.fail(location, format!($text, $t));
+                }
+            };
+        }
+        match rvalue {
+            Rvalue::Use(_) | Rvalue::CopyForDeref(_) => {}
+            Rvalue::Aggregate(kind, fields) => match **kind {
+                AggregateKind::Tuple => {}
+                AggregateKind::Array(dest) => {
+                    for src in fields {
+                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
+                            self.fail(location, "array field has the wrong type");
+                        }
+                    }
+                }
+                AggregateKind::Adt(def_id, idx, args, _, Some(field)) => {
+                    let adt_def = self.tcx.adt_def(def_id);
+                    assert!(adt_def.is_union());
+                    assert_eq!(idx, FIRST_VARIANT);
+                    let dest_ty = self.tcx.normalize_erasing_regions(
+                        self.param_env,
+                        adt_def.non_enum_variant().fields[field].ty(self.tcx, args),
+                    );
+                    if fields.len() == 1 {
+                        let src_ty = fields.raw[0].ty(self.body, self.tcx);
+                        if !self.mir_assign_valid_types(src_ty, dest_ty) {
+                            self.fail(location, "union field has the wrong type");
+                        }
+                    } else {
+                        self.fail(location, "unions should have one initialized field");
+                    }
+                }
+                AggregateKind::Adt(def_id, idx, args, _, None) => {
+                    let adt_def = self.tcx.adt_def(def_id);
+                    assert!(!adt_def.is_union());
+                    let variant = &adt_def.variants()[idx];
+                    if variant.fields.len() != fields.len() {
+                        self.fail(location, "adt has the wrong number of initialized fields");
+                    }
+                    for (src, dest) in std::iter::zip(fields, &variant.fields) {
+                        let dest_ty = self
+                            .tcx
+                            .normalize_erasing_regions(self.param_env, dest.ty(self.tcx, args));
+                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest_ty) {
+                            self.fail(location, "adt field has the wrong type");
+                        }
+                    }
+                }
+                AggregateKind::Closure(_, args) => {
+                    let upvars = args.as_closure().upvar_tys();
+                    if upvars.len() != fields.len() {
+                        self.fail(location, "closure has the wrong number of initialized fields");
+                    }
+                    for (src, dest) in std::iter::zip(fields, upvars) {
+                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
+                            self.fail(location, "closure field has the wrong type");
+                        }
+                    }
+                }
+                AggregateKind::Coroutine(_, args) => {
+                    let upvars = args.as_coroutine().upvar_tys();
+                    if upvars.len() != fields.len() {
+                        self.fail(location, "coroutine has the wrong number of initialized fields");
+                    }
+                    for (src, dest) in std::iter::zip(fields, upvars) {
+                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
+                            self.fail(location, "coroutine field has the wrong type");
+                        }
+                    }
+                }
+                AggregateKind::CoroutineClosure(_, args) => {
+                    let upvars = args.as_coroutine_closure().upvar_tys();
+                    if upvars.len() != fields.len() {
+                        self.fail(
+                            location,
+                            "coroutine-closure has the wrong number of initialized fields",
+                        );
+                    }
+                    for (src, dest) in std::iter::zip(fields, upvars) {
+                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
+                            self.fail(location, "coroutine-closure field has the wrong type");
+                        }
+                    }
+                }
+                AggregateKind::RawPtr(pointee_ty, mutability) => {
+                    if !matches!(self.mir_phase, MirPhase::Runtime(_)) {
+                        // It would probably be fine to support this in earlier phases,
+                        // but at the time of writing it's only ever introduced from intrinsic lowering,
+                        // so earlier things just `bug!` on it.
+                        self.fail(location, "RawPtr should be in runtime MIR only");
+                    }
+
+                    if fields.len() != 2 {
+                        self.fail(location, "raw pointer aggregate must have 2 fields");
+                    } else {
+                        let data_ptr_ty = fields.raw[0].ty(self.body, self.tcx);
+                        let metadata_ty = fields.raw[1].ty(self.body, self.tcx);
+                        if let ty::RawPtr(in_pointee, in_mut) = data_ptr_ty.kind() {
+                            if *in_mut != mutability {
+                                self.fail(location, "input and output mutability must match");
+                            }
+
+                            // FIXME: check `Thin` instead of `Sized`
+                            if !in_pointee.is_sized(self.tcx, self.param_env) {
+                                self.fail(location, "input pointer must be thin");
+                            }
+                        } else {
+                            self.fail(
+                                location,
+                                "first operand to raw pointer aggregate must be a raw pointer",
+                            );
+                        }
+
+                        // FIXME: Check metadata more generally
+                        if pointee_ty.is_slice() {
+                            if !self.mir_assign_valid_types(metadata_ty, self.tcx.types.usize) {
+                                self.fail(location, "slice metadata must be usize");
+                            }
+                        } else if pointee_ty.is_sized(self.tcx, self.param_env) {
+                            if metadata_ty != self.tcx.types.unit {
+                                self.fail(location, "metadata for pointer-to-thin must be unit");
+                            }
+                        }
+                    }
+                }
+            },
+            Rvalue::Ref(_, BorrowKind::Fake(_), _) => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`Assign` statement with a `Fake` borrow should have been removed in runtime MIR",
+                    );
+                }
+            }
+            Rvalue::Ref(..) => {}
+            Rvalue::Len(p) => {
+                let pty = p.ty(&self.body.local_decls, self.tcx).ty;
+                check_kinds!(
+                    pty,
+                    "Cannot compute length of non-array type {:?}",
+                    ty::Array(..) | ty::Slice(..)
+                );
+            }
+            Rvalue::BinaryOp(op, vals) => {
+                use BinOp::*;
+                let a = vals.0.ty(&self.body.local_decls, self.tcx);
+                let b = vals.1.ty(&self.body.local_decls, self.tcx);
+                if crate::util::binop_right_homogeneous(*op) {
+                    if let Eq | Lt | Le | Ne | Ge | Gt = op {
+                        // The function pointer types can have lifetimes
+                        if !self.mir_assign_valid_types(a, b) {
+                            self.fail(
+                                location,
+                                format!("Cannot {op:?} compare incompatible types {a:?} and {b:?}"),
+                            );
+                        }
+                    } else if a != b {
+                        self.fail(
+                            location,
+                            format!(
+                                "Cannot perform binary op {op:?} on unequal types {a:?} and {b:?}"
+                            ),
+                        );
+                    }
+                }
+
+                match op {
+                    Offset => {
+                        check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..));
+                        if b != self.tcx.types.isize && b != self.tcx.types.usize {
+                            self.fail(location, format!("Cannot offset by non-isize type {b:?}"));
+                        }
+                    }
+                    Eq | Lt | Le | Ne | Ge | Gt => {
+                        for x in [a, b] {
+                            check_kinds!(
+                                x,
+                                "Cannot {op:?} compare type {:?}",
+                                ty::Bool
+                                    | ty::Char
+                                    | ty::Int(..)
+                                    | ty::Uint(..)
+                                    | ty::Float(..)
+                                    | ty::RawPtr(..)
+                                    | ty::FnPtr(..)
+                            )
+                        }
+                    }
+                    Cmp => {
+                        for x in [a, b] {
+                            check_kinds!(
+                                x,
+                                "Cannot three-way compare non-integer type {:?}",
+                                ty::Char | ty::Uint(..) | ty::Int(..)
+                            )
+                        }
+                    }
+                    AddUnchecked | AddWithOverflow | SubUnchecked | SubWithOverflow
+                    | MulUnchecked | MulWithOverflow | Shl | ShlUnchecked | Shr | ShrUnchecked => {
+                        for x in [a, b] {
+                            check_kinds!(
+                                x,
+                                "Cannot {op:?} non-integer type {:?}",
+                                ty::Uint(..) | ty::Int(..)
+                            )
+                        }
+                    }
+                    BitAnd | BitOr | BitXor => {
+                        for x in [a, b] {
+                            check_kinds!(
+                                x,
+                                "Cannot perform bitwise op {op:?} on type {:?}",
+                                ty::Uint(..) | ty::Int(..) | ty::Bool
+                            )
+                        }
+                    }
+                    Add | Sub | Mul | Div | Rem => {
+                        for x in [a, b] {
+                            check_kinds!(
+                                x,
+                                "Cannot perform arithmetic {op:?} on type {:?}",
+                                ty::Uint(..) | ty::Int(..) | ty::Float(..)
+                            )
+                        }
+                    }
+                }
+            }
+            Rvalue::UnaryOp(op, operand) => {
+                let a = operand.ty(&self.body.local_decls, self.tcx);
+                match op {
+                    UnOp::Neg => {
+                        check_kinds!(a, "Cannot negate type {:?}", ty::Int(..) | ty::Float(..))
+                    }
+                    UnOp::Not => {
+                        check_kinds!(
+                            a,
+                            "Cannot binary not type {:?}",
+                            ty::Int(..) | ty::Uint(..) | ty::Bool
+                        );
+                    }
+                    UnOp::PtrMetadata => {
+                        if !matches!(self.mir_phase, MirPhase::Runtime(_)) {
+                            // It would probably be fine to support this in earlier phases,
+                            // but at the time of writing it's only ever introduced from intrinsic lowering,
+                            // so earlier things can just `bug!` on it.
+                            self.fail(location, "PtrMetadata should be in runtime MIR only");
+                        }
+
+                        check_kinds!(a, "Cannot PtrMetadata non-pointer type {:?}", ty::RawPtr(..));
+                    }
+                }
+            }
+            Rvalue::ShallowInitBox(operand, _) => {
+                let a = operand.ty(&self.body.local_decls, self.tcx);
+                check_kinds!(a, "Cannot shallow init type {:?}", ty::RawPtr(..));
+            }
+            Rvalue::Cast(kind, operand, target_type) => {
+                let op_ty = operand.ty(self.body, self.tcx);
+                match kind {
+                    CastKind::DynStar => {
+                        // FIXME(dyn-star): make sure nothing needs to be done here.
+                    }
+                    // FIXME: Add Checks for these
+                    CastKind::PointerWithExposedProvenance | CastKind::PointerExposeProvenance => {}
+                    CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer) => {
+                        // FIXME: check signature compatibility.
+                        check_kinds!(
+                            op_ty,
+                            "CastKind::{kind:?} input must be a fn item, not {:?}",
+                            ty::FnDef(..)
+                        );
+                        check_kinds!(
+                            target_type,
+                            "CastKind::{kind:?} output must be a fn pointer, not {:?}",
+                            ty::FnPtr(..)
+                        );
+                    }
+                    CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer) => {
+                        // FIXME: check safety and signature compatibility.
+                        check_kinds!(
+                            op_ty,
+                            "CastKind::{kind:?} input must be a fn pointer, not {:?}",
+                            ty::FnPtr(..)
+                        );
+                        check_kinds!(
+                            target_type,
+                            "CastKind::{kind:?} output must be a fn pointer, not {:?}",
+                            ty::FnPtr(..)
+                        );
+                    }
+                    CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(..)) => {
+                        // FIXME: check safety, captures, and signature compatibility.
+                        check_kinds!(
+                            op_ty,
+                            "CastKind::{kind:?} input must be a closure, not {:?}",
+                            ty::Closure(..)
+                        );
+                        check_kinds!(
+                            target_type,
+                            "CastKind::{kind:?} output must be a fn pointer, not {:?}",
+                            ty::FnPtr(..)
+                        );
+                    }
+                    CastKind::PointerCoercion(PointerCoercion::MutToConstPointer) => {
+                        // FIXME: check same pointee?
+                        check_kinds!(
+                            op_ty,
+                            "CastKind::{kind:?} input must be a raw mut pointer, not {:?}",
+                            ty::RawPtr(_, Mutability::Mut)
+                        );
+                        check_kinds!(
+                            target_type,
+                            "CastKind::{kind:?} output must be a raw const pointer, not {:?}",
+                            ty::RawPtr(_, Mutability::Not)
+                        );
+                    }
+                    CastKind::PointerCoercion(PointerCoercion::ArrayToPointer) => {
+                        // FIXME: Check pointee types
+                        check_kinds!(
+                            op_ty,
+                            "CastKind::{kind:?} input must be a raw pointer, not {:?}",
+                            ty::RawPtr(..)
+                        );
+                        check_kinds!(
+                            target_type,
+                            "CastKind::{kind:?} output must be a raw pointer, not {:?}",
+                            ty::RawPtr(..)
+                        );
+                    }
+                    CastKind::PointerCoercion(PointerCoercion::Unsize) => {
+                        // This is used for all `CoerceUnsized` types,
+                        // not just pointers/references, so is hard to check.
+                    }
+                    CastKind::IntToInt | CastKind::IntToFloat => {
+                        let input_valid = op_ty.is_integral() || op_ty.is_char() || op_ty.is_bool();
+                        let target_valid = target_type.is_numeric() || target_type.is_char();
+                        if !input_valid || !target_valid {
+                            self.fail(
+                                location,
+                                format!("Wrong cast kind {kind:?} for the type {op_ty}",),
+                            );
+                        }
+                    }
+                    CastKind::FnPtrToPtr => {
+                        check_kinds!(
+                            op_ty,
+                            "CastKind::{kind:?} input must be a fn pointer, not {:?}",
+                            ty::FnPtr(..)
+                        );
+                        check_kinds!(
+                            target_type,
+                            "CastKind::{kind:?} output must be a raw pointer, not {:?}",
+                            ty::RawPtr(..)
+                        );
+                    }
+                    CastKind::PtrToPtr => {
+                        check_kinds!(
+                            op_ty,
+                            "CastKind::{kind:?} input must be a raw pointer, not {:?}",
+                            ty::RawPtr(..)
+                        );
+                        check_kinds!(
+                            target_type,
+                            "CastKind::{kind:?} output must be a raw pointer, not {:?}",
+                            ty::RawPtr(..)
+                        );
+                    }
+                    CastKind::FloatToFloat | CastKind::FloatToInt => {
+                        if !op_ty.is_floating_point() || !target_type.is_numeric() {
+                            self.fail(
+                                location,
+                                format!(
+                                    "Trying to cast non 'Float' as {kind:?} into {target_type:?}"
+                                ),
+                            );
+                        }
+                    }
+                    CastKind::Transmute => {
+                        if let MirPhase::Runtime(..) = self.mir_phase {
+                            // Unlike `mem::transmute`, a MIR `Transmute` is well-formed
+                            // for any two `Sized` types, just potentially UB to run.
+
+                            if !self
+                                .tcx
+                                .normalize_erasing_regions(self.param_env, op_ty)
+                                .is_sized(self.tcx, self.param_env)
+                            {
+                                self.fail(
+                                    location,
+                                    format!("Cannot transmute from non-`Sized` type {op_ty:?}"),
+                                );
+                            }
+                            if !self
+                                .tcx
+                                .normalize_erasing_regions(self.param_env, *target_type)
+                                .is_sized(self.tcx, self.param_env)
+                            {
+                                self.fail(
+                                    location,
+                                    format!("Cannot transmute to non-`Sized` type {target_type:?}"),
+                                );
+                            }
+                        } else {
+                            self.fail(
+                                location,
+                                format!(
+                                    "Transmute is not supported in non-runtime phase {:?}.",
+                                    self.mir_phase
+                                ),
+                            );
+                        }
+                    }
+                }
+            }
+            Rvalue::NullaryOp(NullOp::OffsetOf(indices), container) => {
+                let fail_out_of_bounds = |this: &mut Self, location, field, ty| {
+                    this.fail(location, format!("Out of bounds field {field:?} for {ty:?}"));
+                };
+
+                let mut current_ty = *container;
+
+                for (variant, field) in indices.iter() {
+                    match current_ty.kind() {
+                        ty::Tuple(fields) => {
+                            if variant != FIRST_VARIANT {
+                                self.fail(
+                                    location,
+                                    format!("tried to get variant {variant:?} of tuple"),
+                                );
+                                return;
+                            }
+                            let Some(&f_ty) = fields.get(field.as_usize()) else {
+                                fail_out_of_bounds(self, location, field, current_ty);
+                                return;
+                            };
+
+                            current_ty = self.tcx.normalize_erasing_regions(self.param_env, f_ty);
+                        }
+                        ty::Adt(adt_def, args) => {
+                            let Some(field) = adt_def.variant(variant).fields.get(field) else {
+                                fail_out_of_bounds(self, location, field, current_ty);
+                                return;
+                            };
+
+                            let f_ty = field.ty(self.tcx, args);
+                            current_ty = self.tcx.normalize_erasing_regions(self.param_env, f_ty);
+                        }
+                        _ => {
+                            self.fail(
+                                location,
+                                format!("Cannot get offset ({variant:?}, {field:?}) from type {current_ty:?}"),
+                            );
+                            return;
+                        }
+                    }
+                }
+            }
+            Rvalue::Repeat(_, _)
+            | Rvalue::ThreadLocalRef(_)
+            | Rvalue::AddressOf(_, _)
+            | Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf | NullOp::UbChecks, _)
+            | Rvalue::Discriminant(_) => {}
+        }
+        self.super_rvalue(rvalue, location);
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        match &statement.kind {
+            StatementKind::Assign(box (dest, rvalue)) => {
+                // LHS and RHS of the assignment must have the same type.
+                let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
+                let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
+
+                if !self.mir_assign_valid_types(right_ty, left_ty) {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered `{:?}` with incompatible types:\n\
+                            left-hand side has type: {}\n\
+                            right-hand side has type: {}",
+                            statement.kind, left_ty, right_ty,
+                        ),
+                    );
+                }
+                if let Rvalue::CopyForDeref(place) = rvalue {
+                    if place.ty(&self.body.local_decls, self.tcx).ty.builtin_deref(true).is_none() {
+                        self.fail(
+                            location,
+                            "`CopyForDeref` should only be used for dereferenceable types",
+                        )
+                    }
+                }
+            }
+            StatementKind::AscribeUserType(..) => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`AscribeUserType` should have been removed after drop lowering phase",
+                    );
+                }
+            }
+            StatementKind::FakeRead(..) => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`FakeRead` should have been removed after drop lowering phase",
+                    );
+                }
+            }
+            StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(op)) => {
+                let ty = op.ty(&self.body.local_decls, self.tcx);
+                if !ty.is_bool() {
+                    self.fail(
+                        location,
+                        format!("`assume` argument must be `bool`, but got: `{ty}`"),
+                    );
+                }
+            }
+            StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(
+                CopyNonOverlapping { src, dst, count },
+            )) => {
+                let src_ty = src.ty(&self.body.local_decls, self.tcx);
+                let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
+                    src_deref
+                } else {
+                    self.fail(
+                        location,
+                        format!("Expected src to be ptr in copy_nonoverlapping, got: {src_ty}"),
+                    );
+                    return;
+                };
+                let dst_ty = dst.ty(&self.body.local_decls, self.tcx);
+                let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) {
+                    dst_deref
+                } else {
+                    self.fail(
+                        location,
+                        format!("Expected dst to be ptr in copy_nonoverlapping, got: {dst_ty}"),
+                    );
+                    return;
+                };
+                // since CopyNonOverlapping is parametrized by 1 type,
+                // we only need to check that they are equal and not keep an extra parameter.
+                if !self.mir_assign_valid_types(op_src_ty, op_dst_ty) {
+                    self.fail(location, format!("bad arg ({op_src_ty:?} != {op_dst_ty:?})"));
+                }
+
+                let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
+                if op_cnt_ty != self.tcx.types.usize {
+                    self.fail(location, format!("bad arg ({op_cnt_ty:?} != usize)"))
+                }
+            }
+            StatementKind::SetDiscriminant { place, .. } => {
+                if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
+                }
+                let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind();
+                if !matches!(pty, ty::Adt(..) | ty::Coroutine(..) | ty::Alias(ty::Opaque, ..)) {
+                    self.fail(
+                        location,
+                        format!(
+                            "`SetDiscriminant` is only allowed on ADTs and coroutines, not {pty:?}"
+                        ),
+                    );
+                }
+            }
+            StatementKind::Deinit(..) => {
+                if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(location, "`Deinit`is not allowed until deaggregation");
+                }
+            }
+            StatementKind::Retag(kind, _) => {
+                // FIXME(JakobDegen) The validator should check that `self.mir_phase <
+                // DropsLowered`. However, this causes ICEs with generation of drop shims, which
+                // seem to fail to set their `MirPhase` correctly.
+                if matches!(kind, RetagKind::TwoPhase) {
+                    self.fail(location, format!("explicit `{kind:?}` is forbidden"));
+                }
+            }
+            StatementKind::StorageLive(_)
+            | StatementKind::StorageDead(_)
+            | StatementKind::Coverage(_)
+            | StatementKind::ConstEvalCounter
+            | StatementKind::PlaceMention(..)
+            | StatementKind::Nop => {}
+        }
+
+        self.super_statement(statement, location);
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        match &terminator.kind {
+            TerminatorKind::SwitchInt { targets, discr } => {
+                let switch_ty = discr.ty(&self.body.local_decls, self.tcx);
+
+                let target_width = self.tcx.sess.target.pointer_width;
+
+                let size = Size::from_bits(match switch_ty.kind() {
+                    ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(),
+                    ty::Int(int) => int.normalize(target_width).bit_width().unwrap(),
+                    ty::Char => 32,
+                    ty::Bool => 1,
+                    other => bug!("unhandled type: {:?}", other),
+                });
+
+                for (value, _) in targets.iter() {
+                    if Scalar::<()>::try_from_uint(value, size).is_none() {
+                        self.fail(
+                            location,
+                            format!("the value {value:#x} is not a proper {switch_ty:?}"),
+                        )
+                    }
+                }
+            }
+            TerminatorKind::Call { func, .. } => {
+                let func_ty = func.ty(&self.body.local_decls, self.tcx);
+                match func_ty.kind() {
+                    ty::FnPtr(..) | ty::FnDef(..) => {}
+                    _ => self.fail(
+                        location,
+                        format!("encountered non-callable type {func_ty} in `Call` terminator"),
+                    ),
+                }
+            }
+            TerminatorKind::Assert { cond, .. } => {
+                let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
+                if cond_ty != self.tcx.types.bool {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered non-boolean condition of type {cond_ty} in `Assert` terminator"
+                        ),
+                    );
+                }
+            }
+            TerminatorKind::Goto { .. }
+            | TerminatorKind::Drop { .. }
+            | TerminatorKind::Yield { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::InlineAsm { .. }
+            | TerminatorKind::CoroutineDrop
+            | TerminatorKind::UnwindResume
+            | TerminatorKind::UnwindTerminate(_)
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable => {}
+        }
+
+        self.super_terminator(terminator, location);
+    }
+}