about summary refs log tree commit diff
path: root/compiler/rustc_const_eval/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval/src')
-rw-r--r--compiler/rustc_const_eval/src/check_consts/check.rs (renamed from compiler/rustc_const_eval/src/transform/check_consts/check.rs)13
-rw-r--r--compiler/rustc_const_eval/src/check_consts/mod.rs (renamed from compiler/rustc_const_eval/src/transform/check_consts/mod.rs)1
-rw-r--r--compiler/rustc_const_eval/src/check_consts/ops.rs (renamed from compiler/rustc_const_eval/src/transform/check_consts/ops.rs)6
-rw-r--r--compiler/rustc_const_eval/src/check_consts/post_drop_elaboration.rs (renamed from compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs)1
-rw-r--r--compiler/rustc_const_eval/src/check_consts/qualifs.rs (renamed from compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs)4
-rw-r--r--compiler/rustc_const_eval/src/check_consts/resolver.rs (renamed from compiler/rustc_const_eval/src/transform/check_consts/resolver.rs)3
-rw-r--r--compiler/rustc_const_eval/src/const_eval/dummy_machine.rs56
-rw-r--r--compiler/rustc_const_eval/src/const_eval/error.rs28
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs75
-rw-r--r--compiler/rustc_const_eval/src/const_eval/fn_queries.rs6
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs124
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs2
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs16
-rw-r--r--compiler/rustc_const_eval/src/errors.rs33
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs22
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs19
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs105
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs79
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs137
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs187
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs87
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs74
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs362
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs66
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs36
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs47
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs65
-rw-r--r--compiler/rustc_const_eval/src/interpret/traits.rs3
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs12
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs193
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs15
-rw-r--r--compiler/rustc_const_eval/src/lib.rs14
-rw-r--r--compiler/rustc_const_eval/src/transform/mod.rs2
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs1380
-rw-r--r--compiler/rustc_const_eval/src/util/alignment.rs1
-rw-r--r--compiler/rustc_const_eval/src/util/caller_location.rs6
-rw-r--r--compiler/rustc_const_eval/src/util/check_validity_requirement.rs3
-rw-r--r--compiler/rustc_const_eval/src/util/mod.rs9
-rw-r--r--compiler/rustc_const_eval/src/util/type_name.rs1
40 files changed, 1059 insertions, 2236 deletions
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/check_consts/check.rs
index a506d10c1d0..5fbf5b41109 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/check_consts/check.rs
@@ -8,10 +8,11 @@ use rustc_infer::infer::TyCtxtInferExt;
 use rustc_infer::traits::ObligationCause;
 use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
 use rustc_middle::mir::*;
+use rustc_middle::span_bug;
 use rustc_middle::ty::{self, adjustment::PointerCoercion, Ty, TyCtxt};
 use rustc_middle::ty::{Instance, InstanceDef, TypeVisitableExt};
 use rustc_mir_dataflow::Analysis;
-use rustc_span::{sym, Span, Symbol};
+use rustc_span::{sym, Span, Symbol, DUMMY_SP};
 use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
 use rustc_trait_selection::traits::{self, ObligationCauseCode, ObligationCtxt};
 use rustc_type_ir::visit::{TypeSuperVisitable, TypeVisitor};
@@ -19,6 +20,8 @@ use rustc_type_ir::visit::{TypeSuperVisitable, TypeVisitor};
 use std::mem;
 use std::ops::Deref;
 
+use tracing::{debug, instrument, trace};
+
 use super::ops::{self, NonConstOp, Status};
 use super::qualifs::{self, HasMutInterior, NeedsDrop, NeedsNonConstDrop};
 use super::resolver::FlowSensitiveAnalysis;
@@ -414,7 +417,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                         BorrowKind::Shared => {
                             PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow)
                         }
-                        BorrowKind::Fake => {
+                        BorrowKind::Fake(_) => {
                             PlaceContext::NonMutatingUse(NonMutatingUseContext::FakeBorrow)
                         }
                         BorrowKind::Mut { .. } => {
@@ -487,7 +490,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                 }
             }
 
-            Rvalue::Ref(_, BorrowKind::Shared | BorrowKind::Fake, place)
+            Rvalue::Ref(_, BorrowKind::Shared | BorrowKind::Fake(_), place)
             | Rvalue::AddressOf(Mutability::Not, place) => {
                 let borrowed_place_has_mut_interior = qualifs::in_place::<HasMutInterior, _>(
                     self.ccx,
@@ -579,7 +582,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                 }
             }
 
-            Rvalue::BinaryOp(op, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(op, box (lhs, rhs)) => {
+            Rvalue::BinaryOp(op, box (lhs, rhs)) => {
                 let lhs_ty = lhs.ty(self.body, self.tcx);
                 let rhs_ty = rhs.ty(self.body, self.tcx);
 
@@ -738,7 +741,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                 let cause = ObligationCause::new(
                     terminator.source_info.span,
                     self.body.source.def_id().expect_local(),
-                    ObligationCauseCode::ItemObligation(callee),
+                    ObligationCauseCode::WhereClause(callee, DUMMY_SP),
                 );
                 let normalized_predicates = ocx.normalize(&cause, param_env, predicates);
                 ocx.register_obligations(traits::predicates_for_generics(
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs b/compiler/rustc_const_eval/src/check_consts/mod.rs
index 12e7ec15e32..308b90cd470 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
+++ b/compiler/rustc_const_eval/src/check_consts/mod.rs
@@ -8,6 +8,7 @@ use rustc_attr as attr;
 use rustc_errors::DiagCtxt;
 use rustc_hir as hir;
 use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::bug;
 use rustc_middle::mir;
 use rustc_middle::ty::{self, PolyFnSig, TyCtxt};
 use rustc_span::Symbol;
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/check_consts/ops.rs
index dda8f3ed87d..90b622cae65 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
+++ b/compiler/rustc_const_eval/src/check_consts/ops.rs
@@ -8,7 +8,8 @@ use rustc_hir::def_id::DefId;
 use rustc_infer::infer::TyCtxtInferExt;
 use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
 use rustc_middle::mir::{self, CallSource};
-use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::span_bug;
+use rustc_middle::ty::print::{with_no_trimmed_paths, PrintTraitRefExt as _};
 use rustc_middle::ty::{
     self, suggest_constraining_type_param, Closure, FnDef, FnPtr, GenericArgKind, GenericArgsRef,
     Param, TraitRef, Ty,
@@ -18,6 +19,7 @@ use rustc_session::parse::feature_err;
 use rustc_span::symbol::sym;
 use rustc_span::{BytePos, Pos, Span, Symbol};
 use rustc_trait_selection::traits::SelectionContext;
+use tracing::debug;
 
 use super::ConstCx;
 use crate::errors;
@@ -307,7 +309,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
         }
 
         if let ConstContext::Static(_) = ccx.const_kind() {
-            err.note("consider wrapping this expression in `Lazy::new(|| ...)` from the `once_cell` crate: https://crates.io/crates/once_cell");
+            err.note("consider wrapping this expression in `std::sync::LazyLock::new(|| ...)`");
         }
 
         err
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/check_consts/post_drop_elaboration.rs
index 5cd13783c23..f0763f1e490 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
+++ b/compiler/rustc_const_eval/src/check_consts/post_drop_elaboration.rs
@@ -2,6 +2,7 @@ use rustc_middle::mir::visit::Visitor;
 use rustc_middle::mir::{self, BasicBlock, Location};
 use rustc_middle::ty::{Ty, TyCtxt};
 use rustc_span::{symbol::sym, Span};
+use tracing::trace;
 
 use super::check::Qualifs;
 use super::ops::{self, NonConstOp};
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/check_consts/qualifs.rs
index 1847847d9d2..5949444e599 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
+++ b/compiler/rustc_const_eval/src/check_consts/qualifs.rs
@@ -5,6 +5,7 @@
 use rustc_errors::ErrorGuaranteed;
 use rustc_hir::LangItem;
 use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::bug;
 use rustc_middle::mir;
 use rustc_middle::mir::*;
 use rustc_middle::traits::BuiltinImplSource;
@@ -12,6 +13,7 @@ use rustc_middle::ty::{self, AdtDef, GenericArgsRef, Ty};
 use rustc_trait_selection::traits::{
     ImplSource, Obligation, ObligationCause, ObligationCtxt, SelectionContext,
 };
+use tracing::{instrument, trace};
 
 use super::ConstCx;
 
@@ -260,7 +262,7 @@ where
         | Rvalue::Cast(_, operand, _)
         | Rvalue::ShallowInitBox(operand, _) => in_operand::<Q, _>(cx, in_local, operand),
 
-        Rvalue::BinaryOp(_, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(_, box (lhs, rhs)) => {
+        Rvalue::BinaryOp(_, box (lhs, rhs)) => {
             in_operand::<Q, _>(cx, in_local, lhs) || in_operand::<Q, _>(cx, in_local, rhs)
         }
 
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs b/compiler/rustc_const_eval/src/check_consts/resolver.rs
index 2c835f6750f..011341472b4 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
+++ b/compiler/rustc_const_eval/src/check_consts/resolver.rs
@@ -105,7 +105,7 @@ where
     fn ref_allows_mutation(&self, kind: mir::BorrowKind, place: mir::Place<'tcx>) -> bool {
         match kind {
             mir::BorrowKind::Mut { .. } => true,
-            mir::BorrowKind::Shared | mir::BorrowKind::Fake => {
+            mir::BorrowKind::Shared | mir::BorrowKind::Fake(_) => {
                 self.shared_borrow_allows_mutation(place)
             }
         }
@@ -200,7 +200,6 @@ where
             | mir::Rvalue::Repeat(..)
             | mir::Rvalue::Len(..)
             | mir::Rvalue::BinaryOp(..)
-            | mir::Rvalue::CheckedBinaryOp(..)
             | mir::Rvalue::NullaryOp(..)
             | mir::Rvalue::UnaryOp(..)
             | mir::Rvalue::Discriminant(..)
diff --git a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs
index ba2e2a1e353..9a98677a844 100644
--- a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs
@@ -1,9 +1,12 @@
-use crate::interpret::{self, HasStaticRootDefId, ImmTy, Immediate, InterpCx, PointerArithmetic};
+use crate::interpret::{
+    self, throw_machine_stop, HasStaticRootDefId, ImmTy, Immediate, InterpCx, PointerArithmetic,
+};
 use rustc_middle::mir::interpret::{AllocId, ConstAllocation, InterpResult};
 use rustc_middle::mir::*;
 use rustc_middle::query::TyCtxtAt;
 use rustc_middle::ty;
 use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::{bug, span_bug};
 use rustc_span::def_id::DefId;
 
 /// Macro for machine-specific `InterpError` without allocation.
@@ -41,17 +44,20 @@ impl HasStaticRootDefId for DummyMachine {
     }
 }
 
-impl<'mir, 'tcx: 'mir> interpret::Machine<'mir, 'tcx> for DummyMachine {
-    interpret::compile_time_machine!(<'mir, 'tcx>);
+impl<'tcx> interpret::Machine<'tcx> for DummyMachine {
+    interpret::compile_time_machine!(<'tcx>);
     type MemoryKind = !;
     const PANIC_ON_ALLOC_FAIL: bool = true;
 
+    // We want to just eval random consts in the program, so `eval_mir_const` can fail.
+    const ALL_CONSTS_ARE_PRECHECKED: bool = false;
+
     #[inline(always)]
-    fn enforce_alignment(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+    fn enforce_alignment(_ecx: &InterpCx<'tcx, Self>) -> bool {
         false // no reason to enforce alignment
     }
 
-    fn enforce_validity(_ecx: &InterpCx<'mir, 'tcx, Self>, _layout: TyAndLayout<'tcx>) -> bool {
+    fn enforce_validity(_ecx: &InterpCx<'tcx, Self>, _layout: TyAndLayout<'tcx>) -> bool {
         false
     }
 
@@ -77,37 +83,37 @@ impl<'mir, 'tcx: 'mir> interpret::Machine<'mir, 'tcx> for DummyMachine {
     }
 
     fn find_mir_or_eval_fn(
-        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _ecx: &mut InterpCx<'tcx, Self>,
         _instance: ty::Instance<'tcx>,
         _abi: rustc_target::spec::abi::Abi,
         _args: &[interpret::FnArg<'tcx, Self::Provenance>],
         _destination: &interpret::MPlaceTy<'tcx, Self::Provenance>,
         _target: Option<BasicBlock>,
         _unwind: UnwindAction,
-    ) -> interpret::InterpResult<'tcx, Option<(&'mir Body<'tcx>, ty::Instance<'tcx>)>> {
+    ) -> interpret::InterpResult<'tcx, Option<(&'tcx Body<'tcx>, ty::Instance<'tcx>)>> {
         unimplemented!()
     }
 
     fn panic_nounwind(
-        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _ecx: &mut InterpCx<'tcx, Self>,
         _msg: &str,
     ) -> interpret::InterpResult<'tcx> {
         unimplemented!()
     }
 
     fn call_intrinsic(
-        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _ecx: &mut InterpCx<'tcx, Self>,
         _instance: ty::Instance<'tcx>,
         _args: &[interpret::OpTy<'tcx, Self::Provenance>],
         _destination: &interpret::MPlaceTy<'tcx, Self::Provenance>,
         _target: Option<BasicBlock>,
         _unwind: UnwindAction,
-    ) -> interpret::InterpResult<'tcx> {
+    ) -> interpret::InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
         unimplemented!()
     }
 
     fn assert_panic(
-        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _ecx: &mut InterpCx<'tcx, Self>,
         _msg: &rustc_middle::mir::AssertMessage<'tcx>,
         _unwind: UnwindAction,
     ) -> interpret::InterpResult<'tcx> {
@@ -115,11 +121,11 @@ impl<'mir, 'tcx: 'mir> interpret::Machine<'mir, 'tcx> for DummyMachine {
     }
 
     fn binary_ptr_op(
-        ecx: &InterpCx<'mir, 'tcx, Self>,
+        ecx: &InterpCx<'tcx, Self>,
         bin_op: BinOp,
         left: &interpret::ImmTy<'tcx, Self::Provenance>,
         right: &interpret::ImmTy<'tcx, Self::Provenance>,
-    ) -> interpret::InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)> {
+    ) -> interpret::InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>> {
         use rustc_middle::mir::BinOp::*;
         Ok(match bin_op {
             Eq | Ne | Lt | Le | Gt | Ge => {
@@ -148,7 +154,7 @@ impl<'mir, 'tcx: 'mir> interpret::Machine<'mir, 'tcx> for DummyMachine {
                     Ge => left >= right,
                     _ => bug!(),
                 };
-                (ImmTy::from_bool(res, *ecx.tcx), false)
+                ImmTy::from_bool(res, *ecx.tcx)
             }
 
             // Some more operations are possible with atomics.
@@ -162,32 +168,30 @@ impl<'mir, 'tcx: 'mir> interpret::Machine<'mir, 'tcx> for DummyMachine {
     }
 
     fn expose_ptr(
-        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _ecx: &mut InterpCx<'tcx, Self>,
         _ptr: interpret::Pointer<Self::Provenance>,
     ) -> interpret::InterpResult<'tcx> {
         unimplemented!()
     }
 
-    fn init_frame_extra(
-        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
-        _frame: interpret::Frame<'mir, 'tcx, Self::Provenance>,
-    ) -> interpret::InterpResult<
-        'tcx,
-        interpret::Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
-    > {
+    fn init_frame(
+        _ecx: &mut InterpCx<'tcx, Self>,
+        _frame: interpret::Frame<'tcx, Self::Provenance>,
+    ) -> interpret::InterpResult<'tcx, interpret::Frame<'tcx, Self::Provenance, Self::FrameExtra>>
+    {
         unimplemented!()
     }
 
     fn stack<'a>(
-        _ecx: &'a InterpCx<'mir, 'tcx, Self>,
-    ) -> &'a [interpret::Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] {
+        _ecx: &'a InterpCx<'tcx, Self>,
+    ) -> &'a [interpret::Frame<'tcx, Self::Provenance, Self::FrameExtra>] {
         // Return an empty stack instead of panicking, as `cur_span` uses it to evaluate constants.
         &[]
     }
 
     fn stack_mut<'a>(
-        _ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
-    ) -> &'a mut Vec<interpret::Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> {
+        _ecx: &'a mut InterpCx<'tcx, Self>,
+    ) -> &'a mut Vec<interpret::Frame<'tcx, Self::Provenance, Self::FrameExtra>> {
         unimplemented!()
     }
 }
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs
index 763344207c4..923b9ddf9af 100644
--- a/compiler/rustc_const_eval/src/const_eval/error.rs
+++ b/compiler/rustc_const_eval/src/const_eval/error.rs
@@ -2,15 +2,16 @@ use std::mem;
 
 use rustc_errors::{DiagArgName, DiagArgValue, DiagMessage, Diagnostic, IntoDiagArg};
 use rustc_hir::CRATE_HIR_ID;
-use rustc_middle::mir::interpret::Provenance;
+use rustc_middle::mir::interpret::{Provenance, ReportedErrorInfo};
 use rustc_middle::mir::AssertKind;
 use rustc_middle::query::TyCtxtAt;
 use rustc_middle::ty::TyCtxt;
 use rustc_middle::ty::{layout::LayoutError, ConstInt};
-use rustc_span::{Span, Symbol, DUMMY_SP};
+use rustc_span::{Span, Symbol};
 
 use super::CompileTimeInterpreter;
 use crate::errors::{self, FrameNote, ReportErrorExt};
+use crate::interpret::{err_inval, err_machine_stop};
 use crate::interpret::{ErrorHandled, Frame, InterpError, InterpErrorInfo, MachineStopType};
 
 /// The CTFE machine has some custom error kinds.
@@ -57,13 +58,10 @@ impl<'tcx> Into<InterpErrorInfo<'tcx>> for ConstEvalErrKind {
     }
 }
 
-pub fn get_span_and_frames<'tcx, 'mir>(
+pub fn get_span_and_frames<'tcx>(
     tcx: TyCtxtAt<'tcx>,
-    stack: &[Frame<'mir, 'tcx, impl Provenance, impl Sized>],
-) -> (Span, Vec<errors::FrameNote>)
-where
-    'tcx: 'mir,
-{
+    stack: &[Frame<'tcx, impl Provenance, impl Sized>],
+) -> (Span, Vec<errors::FrameNote>) {
     let mut stacktrace = Frame::generate_stacktrace_from_stack(stack);
     // Filter out `requires_caller_location` frames.
     stacktrace.retain(|frame| !frame.instance.def.requires_caller_location(*tcx));
@@ -121,7 +119,7 @@ where
 pub(super) fn report<'tcx, C, F, E>(
     tcx: TyCtxt<'tcx>,
     error: InterpError<'tcx>,
-    span: Option<Span>,
+    span: Span,
     get_span_and_frames: C,
     mk: F,
 ) -> ErrorHandled
@@ -135,16 +133,16 @@ where
         // Don't emit a new diagnostic for these errors, they are already reported elsewhere or
         // should remain silent.
         err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
-            ErrorHandled::TooGeneric(span.unwrap_or(DUMMY_SP))
+            ErrorHandled::TooGeneric(span)
         }
-        err_inval!(AlreadyReported(guar)) => ErrorHandled::Reported(guar, span.unwrap_or(DUMMY_SP)),
+        err_inval!(AlreadyReported(guar)) => ErrorHandled::Reported(guar, span),
         err_inval!(Layout(LayoutError::ReferencesError(guar))) => {
-            ErrorHandled::Reported(guar.into(), span.unwrap_or(DUMMY_SP))
+            ErrorHandled::Reported(ReportedErrorInfo::tainted_by_errors(guar), span)
         }
         // Report remaining errors.
         _ => {
             let (our_span, frames) = get_span_and_frames();
-            let span = span.unwrap_or(our_span);
+            let span = span.substitute_dummy(our_span);
             let err = mk(span, frames);
             let mut err = tcx.dcx().create_err(err);
 
@@ -160,9 +158,9 @@ where
 
 /// Emit a lint from a const-eval situation.
 // Even if this is unused, please don't remove it -- chances are we will need to emit a lint during const-eval again in the future!
-pub(super) fn lint<'tcx, 'mir, L>(
+pub(super) fn lint<'tcx, L>(
     tcx: TyCtxtAt<'tcx>,
-    machine: &CompileTimeInterpreter<'mir, 'tcx>,
+    machine: &CompileTimeInterpreter<'tcx>,
     lint: &'static rustc_session::lint::Lint,
     decorator: impl FnOnce(Vec<errors::FrameNote>) -> L,
 ) where
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index 4283ebc99d2..36f468d3308 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -1,8 +1,10 @@
 use std::sync::atomic::Ordering::Relaxed;
 
 use either::{Left, Right};
+use tracing::{debug, instrument, trace};
 
 use rustc_hir::def::DefKind;
+use rustc_middle::bug;
 use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo};
 use rustc_middle::mir::{self, ConstAlloc, ConstValue};
 use rustc_middle::query::TyCtxtAt;
@@ -10,28 +12,29 @@ use rustc_middle::traits::Reveal;
 use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_session::lint;
 use rustc_span::def_id::LocalDefId;
-use rustc_span::Span;
+use rustc_span::{Span, DUMMY_SP};
 use rustc_target::abi::{self, Abi};
 
 use super::{CanAccessMutGlobal, CompileTimeEvalContext, CompileTimeInterpreter};
 use crate::const_eval::CheckAlignment;
-use crate::errors;
 use crate::errors::ConstEvalError;
-use crate::interpret::eval_nullary_intrinsic;
+use crate::errors::{self, DanglingPtrInFinal};
 use crate::interpret::{
     create_static_alloc, intern_const_alloc_recursive, CtfeValidationMode, GlobalId, Immediate,
     InternKind, InterpCx, InterpError, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking,
     StackPopCleanup,
 };
+use crate::interpret::{eval_nullary_intrinsic, throw_exhaust, InternResult};
 use crate::CTRL_C_RECEIVED;
 
 // Returns a pointer to where the result lives
 #[instrument(level = "trace", skip(ecx, body))]
-fn eval_body_using_ecx<'mir, 'tcx, R: InterpretationResult<'tcx>>(
-    ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
+fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>(
+    ecx: &mut CompileTimeEvalContext<'tcx>,
     cid: GlobalId<'tcx>,
-    body: &'mir mir::Body<'tcx>,
+    body: &'tcx mir::Body<'tcx>,
 ) -> InterpResult<'tcx, R> {
     trace!(?ecx.param_env);
     let tcx = *ecx.tcx;
@@ -89,11 +92,35 @@ fn eval_body_using_ecx<'mir, 'tcx, R: InterpretationResult<'tcx>>(
     }
 
     // Intern the result
-    intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
+    let intern_result = intern_const_alloc_recursive(ecx, intern_kind, &ret);
 
     // Since evaluation had no errors, validate the resulting constant.
     const_validate_mplace(&ecx, &ret, cid)?;
 
+    // Only report this after validation, as validaiton produces much better diagnostics.
+    // FIXME: ensure validation always reports this and stop making interning care about it.
+
+    match intern_result {
+        Ok(()) => {}
+        Err(InternResult::FoundDanglingPointer) => {
+            return Err(ecx
+                .tcx
+                .dcx()
+                .emit_err(DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind })
+                .into());
+        }
+        Err(InternResult::FoundBadMutablePointer) => {
+            // only report mutable pointers if there were no dangling pointers
+            let err_diag = errors::MutablePtrInFinal { span: ecx.tcx.span, kind: intern_kind };
+            ecx.tcx.emit_node_span_lint(
+                lint::builtin::CONST_EVAL_MUTABLE_PTR_IN_FINAL_VALUE,
+                ecx.best_lint_scope(),
+                err_diag.span,
+                err_diag,
+            )
+        }
+    }
+
     Ok(R::make_result(ret, ecx))
 }
 
@@ -107,12 +134,12 @@ fn eval_body_using_ecx<'mir, 'tcx, R: InterpretationResult<'tcx>>(
 /// that inform us about the generic bounds of the constant. E.g., using an associated constant
 /// of a function's generic parameter will require knowledge about the bounds on the generic
 /// parameter. These bounds are passed to `mk_eval_cx` via the `ParamEnv` argument.
-pub(crate) fn mk_eval_cx_to_read_const_val<'mir, 'tcx>(
+pub(crate) fn mk_eval_cx_to_read_const_val<'tcx>(
     tcx: TyCtxt<'tcx>,
     root_span: Span,
     param_env: ty::ParamEnv<'tcx>,
     can_access_mut_global: CanAccessMutGlobal,
-) -> CompileTimeEvalContext<'mir, 'tcx> {
+) -> CompileTimeEvalContext<'tcx> {
     debug!("mk_eval_cx: {:?}", param_env);
     InterpCx::new(
         tcx,
@@ -124,12 +151,12 @@ pub(crate) fn mk_eval_cx_to_read_const_val<'mir, 'tcx>(
 
 /// Create an interpreter context to inspect the given `ConstValue`.
 /// Returns both the context and an `OpTy` that represents the constant.
-pub fn mk_eval_cx_for_const_val<'mir, 'tcx>(
+pub fn mk_eval_cx_for_const_val<'tcx>(
     tcx: TyCtxtAt<'tcx>,
     param_env: ty::ParamEnv<'tcx>,
     val: mir::ConstValue<'tcx>,
     ty: Ty<'tcx>,
-) -> Option<(CompileTimeEvalContext<'mir, 'tcx>, OpTy<'tcx>)> {
+) -> Option<(CompileTimeEvalContext<'tcx>, OpTy<'tcx>)> {
     let ecx = mk_eval_cx_to_read_const_val(tcx.tcx, tcx.span, param_env, CanAccessMutGlobal::No);
     let op = ecx.const_val_to_op(val, ty, None).ok()?;
     Some((ecx, op))
@@ -143,7 +170,7 @@ pub fn mk_eval_cx_for_const_val<'mir, 'tcx>(
 /// encounter an `Indirect` they cannot handle.
 #[instrument(skip(ecx), level = "debug")]
 pub(super) fn op_to_const<'tcx>(
-    ecx: &CompileTimeEvalContext<'_, 'tcx>,
+    ecx: &CompileTimeEvalContext<'tcx>,
     op: &OpTy<'tcx>,
     for_diagnostics: bool,
 ) -> ConstValue<'tcx> {
@@ -197,7 +224,7 @@ pub(super) fn op_to_const<'tcx>(
                 // This codepath solely exists for `valtree_to_const_value` to not need to generate
                 // a `ConstValue::Indirect` for wide references, so it is tightly restricted to just
                 // that case.
-                let pointee_ty = imm.layout.ty.builtin_deref(false).unwrap().ty; // `false` = no raw ptrs
+                let pointee_ty = imm.layout.ty.builtin_deref(false).unwrap(); // `false` = no raw ptrs
                 debug_assert!(
                     matches!(
                         ecx.tcx.struct_tail_without_normalization(pointee_ty).kind(),
@@ -273,7 +300,7 @@ pub fn eval_to_const_value_raw_provider<'tcx>(
             super::report(
                 tcx,
                 error.into_kind(),
-                Some(span),
+                span,
                 || (span, vec![]),
                 |span, _| errors::NullaryIntrinsicError { span },
             )
@@ -299,16 +326,16 @@ pub trait InterpretationResult<'tcx> {
     /// This function takes the place where the result of the evaluation is stored
     /// and prepares it for returning it in the appropriate format needed by the specific
     /// evaluation query.
-    fn make_result<'mir>(
+    fn make_result(
         mplace: MPlaceTy<'tcx>,
-        ecx: &mut InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>,
+        ecx: &mut InterpCx<'tcx, CompileTimeInterpreter<'tcx>>,
     ) -> Self;
 }
 
 impl<'tcx> InterpretationResult<'tcx> for ConstAlloc<'tcx> {
-    fn make_result<'mir>(
+    fn make_result(
         mplace: MPlaceTy<'tcx>,
-        _ecx: &mut InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>,
+        _ecx: &mut InterpCx<'tcx, CompileTimeInterpreter<'tcx>>,
     ) -> Self {
         ConstAlloc { alloc_id: mplace.ptr().provenance.unwrap().alloc_id(), ty: mplace.layout.ty }
     }
@@ -381,7 +408,7 @@ fn eval_in_interpreter<'tcx, R: InterpretationResult<'tcx>>(
         super::report(
             *ecx.tcx,
             error,
-            None,
+            DUMMY_SP,
             || super::get_span_and_frames(ecx.tcx, ecx.stack()),
             |span, frames| ConstEvalError { span, error_kind: kind, instance, frame_notes: frames },
         )
@@ -389,8 +416,8 @@ fn eval_in_interpreter<'tcx, R: InterpretationResult<'tcx>>(
 }
 
 #[inline(always)]
-fn const_validate_mplace<'mir, 'tcx>(
-    ecx: &InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>,
+fn const_validate_mplace<'tcx>(
+    ecx: &InterpCx<'tcx, CompileTimeInterpreter<'tcx>>,
     mplace: &MPlaceTy<'tcx>,
     cid: GlobalId<'tcx>,
 ) -> Result<(), ErrorHandled> {
@@ -419,8 +446,8 @@ fn const_validate_mplace<'mir, 'tcx>(
 }
 
 #[inline(always)]
-fn report_validation_error<'mir, 'tcx>(
-    ecx: &InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>,
+fn report_validation_error<'tcx>(
+    ecx: &InterpCx<'tcx, CompileTimeInterpreter<'tcx>>,
     error: InterpErrorInfo<'tcx>,
     alloc_id: AllocId,
 ) -> ErrorHandled {
@@ -436,7 +463,7 @@ fn report_validation_error<'mir, 'tcx>(
     crate::const_eval::report(
         *ecx.tcx,
         error,
-        None,
+        DUMMY_SP,
         || crate::const_eval::get_span_and_frames(ecx.tcx, ecx.stack()),
         move |span, frames| errors::ValidationFailure { span, ub_note, frames, raw_bytes },
     )
diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
index ddad6683afb..3c11d67e748 100644
--- a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
@@ -38,7 +38,6 @@ fn constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness {
     match node {
         hir::Node::Ctor(_)
         | hir::Node::AnonConst(_)
-        | hir::Node::ConstBlock(_)
         | hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. }) => {
             hir::Constness::Const
         }
@@ -57,6 +56,7 @@ fn constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness {
             if is_const { hir::Constness::Const } else { hir::Constness::NotConst }
         }
         hir::Node::Expr(e) if let hir::ExprKind::Closure(c) = e.kind => c.constness,
+        hir::Node::Expr(e) if let hir::ExprKind::ConstBlock(_) = e.kind => hir::Constness::Const,
         _ => {
             if let Some(fn_kind) = node.fn_kind() {
                 if fn_kind.constness() == hir::Constness::Const {
@@ -81,8 +81,8 @@ fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
                 if cfg!(debug_assertions) && stab.promotable {
                     let sig = tcx.fn_sig(def_id);
                     assert_eq!(
-                        sig.skip_binder().unsafety(),
-                        hir::Unsafety::Normal,
+                        sig.skip_binder().safety(),
+                        hir::Safety::Safe,
                         "don't mark const unsafe fns as promotable",
                         // https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682
                     );
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
index dd835279df3..79a161d3f03 100644
--- a/compiler/rustc_const_eval/src/const_eval/machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -6,10 +6,10 @@ use std::ops::ControlFlow;
 use rustc_ast::Mutability;
 use rustc_data_structures::fx::FxIndexMap;
 use rustc_data_structures::fx::IndexEntry;
-use rustc_hir::def::DefKind;
 use rustc_hir::def_id::DefId;
 use rustc_hir::def_id::LocalDefId;
 use rustc_hir::LangItem;
+use rustc_middle::bug;
 use rustc_middle::mir;
 use rustc_middle::mir::AssertMessage;
 use rustc_middle::query::TyCtxtAt;
@@ -20,12 +20,14 @@ use rustc_span::symbol::{sym, Symbol};
 use rustc_span::Span;
 use rustc_target::abi::{Align, Size};
 use rustc_target::spec::abi::Abi as CallAbi;
+use tracing::debug;
 
 use crate::errors::{LongRunning, LongRunningWarn};
 use crate::fluent_generated as fluent;
 use crate::interpret::{
-    self, compile_time_machine, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, FnVal,
-    Frame, ImmTy, InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, PointerArithmetic, Scalar,
+    self, compile_time_machine, err_ub, throw_exhaust, throw_inval, throw_ub_custom, throw_unsup,
+    throw_unsup_format, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, FnVal, Frame,
+    GlobalAlloc, ImmTy, InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, PointerArithmetic, Scalar,
 };
 
 use super::error::*;
@@ -42,7 +44,7 @@ const TINY_LINT_TERMINATOR_LIMIT: usize = 20;
 const PROGRESS_INDICATOR_START: usize = 4_000_000;
 
 /// Extra machine state for CTFE, and the Machine instance
-pub struct CompileTimeInterpreter<'mir, 'tcx> {
+pub struct CompileTimeInterpreter<'tcx> {
     /// The number of terminators that have been evaluated.
     ///
     /// This is used to produce lints informing the user that the compiler is not stuck.
@@ -50,7 +52,7 @@ pub struct CompileTimeInterpreter<'mir, 'tcx> {
     pub(super) num_evaluated_steps: usize,
 
     /// The virtual call stack.
-    pub(super) stack: Vec<Frame<'mir, 'tcx>>,
+    pub(super) stack: Vec<Frame<'tcx>>,
 
     /// Pattern matching on consts with references would be unsound if those references
     /// could point to anything mutable. Therefore, when evaluating consts and when constructing valtrees,
@@ -87,7 +89,7 @@ impl From<bool> for CanAccessMutGlobal {
     }
 }
 
-impl<'mir, 'tcx> CompileTimeInterpreter<'mir, 'tcx> {
+impl<'tcx> CompileTimeInterpreter<'tcx> {
     pub(crate) fn new(
         can_access_mut_global: CanAccessMutGlobal,
         check_alignment: CheckAlignment,
@@ -161,8 +163,7 @@ impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxIndexMap<K, V> {
     }
 }
 
-pub(crate) type CompileTimeEvalContext<'mir, 'tcx> =
-    InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>;
+pub(crate) type CompileTimeEvalContext<'tcx> = InterpCx<'tcx, CompileTimeInterpreter<'tcx>>;
 
 #[derive(Debug, PartialEq, Eq, Copy, Clone)]
 pub enum MemoryKind {
@@ -194,7 +195,7 @@ impl interpret::MayLeak for ! {
     }
 }
 
-impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
+impl<'tcx> CompileTimeEvalContext<'tcx> {
     fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32) {
         let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
         let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
@@ -368,53 +369,42 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
     }
 }
 
-impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, 'tcx> {
-    compile_time_machine!(<'mir, 'tcx>);
+impl<'tcx> interpret::Machine<'tcx> for CompileTimeInterpreter<'tcx> {
+    compile_time_machine!(<'tcx>);
 
     type MemoryKind = MemoryKind;
 
     const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error
 
     #[inline(always)]
-    fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+    fn enforce_alignment(ecx: &InterpCx<'tcx, Self>) -> bool {
         matches!(ecx.machine.check_alignment, CheckAlignment::Error)
     }
 
     #[inline(always)]
-    fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool {
+    fn enforce_validity(ecx: &InterpCx<'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool {
         ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks || layout.abi.is_uninhabited()
     }
 
     fn load_mir(
-        ecx: &InterpCx<'mir, 'tcx, Self>,
+        ecx: &InterpCx<'tcx, Self>,
         instance: ty::InstanceDef<'tcx>,
     ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
         match instance {
-            ty::InstanceDef::Item(def) => {
-                if ecx.tcx.is_ctfe_mir_available(def) {
-                    Ok(ecx.tcx.mir_for_ctfe(def))
-                } else if ecx.tcx.def_kind(def) == DefKind::AssocConst {
-                    ecx.tcx.dcx().bug("This is likely a const item that is missing from its impl");
-                } else {
-                    // `find_mir_or_eval_fn` checks that this is a const fn before even calling us,
-                    // so this should be unreachable.
-                    let path = ecx.tcx.def_path_str(def);
-                    bug!("trying to call extern function `{path}` at compile-time");
-                }
-            }
+            ty::InstanceDef::Item(def) => Ok(ecx.tcx.mir_for_ctfe(def)),
             _ => Ok(ecx.tcx.instance_mir(instance)),
         }
     }
 
     fn find_mir_or_eval_fn(
-        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        ecx: &mut InterpCx<'tcx, Self>,
         orig_instance: ty::Instance<'tcx>,
         _abi: CallAbi,
         args: &[FnArg<'tcx>],
         dest: &MPlaceTy<'tcx>,
         ret: Option<mir::BasicBlock>,
         _unwind: mir::UnwindAction, // unwinding is not supported in consts
-    ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>> {
+    ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>> {
         debug!("find_mir_or_eval_fn: {:?}", orig_instance);
 
         // Replace some functions.
@@ -445,7 +435,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
         Ok(Some((ecx.load_mir(instance.def, None)?, orig_instance)))
     }
 
-    fn panic_nounwind(ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
+    fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
         let msg = Symbol::intern(msg);
         let span = ecx.find_closest_untracked_caller_location();
         let (file, line, col) = ecx.location_triple_for_span(span);
@@ -453,23 +443,20 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
     }
 
     fn call_intrinsic(
-        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        ecx: &mut InterpCx<'tcx, Self>,
         instance: ty::Instance<'tcx>,
         args: &[OpTy<'tcx>],
         dest: &MPlaceTy<'tcx, Self::Provenance>,
         target: Option<mir::BasicBlock>,
         _unwind: mir::UnwindAction,
-    ) -> InterpResult<'tcx> {
+    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
         // Shared intrinsics.
         if ecx.emulate_intrinsic(instance, args, dest, target)? {
-            return Ok(());
+            return Ok(None);
         }
         let intrinsic_name = ecx.tcx.item_name(instance.def_id());
 
         // CTFE-specific intrinsics.
-        let Some(ret) = target else {
-            throw_unsup_format!("intrinsic `{intrinsic_name}` is not supported at compile-time");
-        };
         match intrinsic_name {
             sym::ptr_guaranteed_cmp => {
                 let a = ecx.read_scalar(&args[0])?;
@@ -536,18 +523,26 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
             // not the optimization stage.)
             sym::is_val_statically_known => ecx.write_scalar(Scalar::from_bool(false), dest)?,
             _ => {
-                throw_unsup_format!(
-                    "intrinsic `{intrinsic_name}` is not supported at compile-time"
-                );
+                // We haven't handled the intrinsic, let's see if we can use a fallback body.
+                if ecx.tcx.intrinsic(instance.def_id()).unwrap().must_be_overridden {
+                    throw_unsup_format!(
+                        "intrinsic `{intrinsic_name}` is not supported at compile-time"
+                    );
+                }
+                return Ok(Some(ty::Instance {
+                    def: ty::InstanceDef::Item(instance.def_id()),
+                    args: instance.args,
+                }));
             }
         }
 
-        ecx.go_to_block(ret);
-        Ok(())
+        // Intrinsic is done, jump to next block.
+        ecx.return_to_block(target)?;
+        Ok(None)
     }
 
     fn assert_panic(
-        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        ecx: &mut InterpCx<'tcx, Self>,
         msg: &AssertMessage<'tcx>,
         _unwind: mir::UnwindAction,
     ) -> InterpResult<'tcx> {
@@ -578,15 +573,15 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
     }
 
     fn binary_ptr_op(
-        _ecx: &InterpCx<'mir, 'tcx, Self>,
+        _ecx: &InterpCx<'tcx, Self>,
         _bin_op: mir::BinOp,
         _left: &ImmTy<'tcx>,
         _right: &ImmTy<'tcx>,
-    ) -> InterpResult<'tcx, (ImmTy<'tcx>, bool)> {
+    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
         throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
     }
 
-    fn increment_const_eval_counter(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+    fn increment_const_eval_counter(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
         // The step limit has already been hit in a previous call to `increment_const_eval_counter`.
 
         if let Some(new_steps) = ecx.machine.num_evaluated_steps.checked_add(1) {
@@ -642,16 +637,16 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
     }
 
     #[inline(always)]
-    fn expose_ptr(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx> {
+    fn expose_ptr(_ecx: &mut InterpCx<'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx> {
         // This is only reachable with -Zunleash-the-miri-inside-of-you.
         throw_unsup_format!("exposing pointers is not possible at compile-time")
     }
 
     #[inline(always)]
-    fn init_frame_extra(
-        ecx: &mut InterpCx<'mir, 'tcx, Self>,
-        frame: Frame<'mir, 'tcx>,
-    ) -> InterpResult<'tcx, Frame<'mir, 'tcx>> {
+    fn init_frame(
+        ecx: &mut InterpCx<'tcx, Self>,
+        frame: Frame<'tcx>,
+    ) -> InterpResult<'tcx, Frame<'tcx>> {
         // Enforce stack size limit. Add 1 because this is run before the new frame is pushed.
         if !ecx.recursion_limit.value_within_limit(ecx.stack().len() + 1) {
             throw_exhaust!(StackFrameLimitReached)
@@ -662,15 +657,15 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
 
     #[inline(always)]
     fn stack<'a>(
-        ecx: &'a InterpCx<'mir, 'tcx, Self>,
-    ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] {
+        ecx: &'a InterpCx<'tcx, Self>,
+    ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>] {
         &ecx.machine.stack
     }
 
     #[inline(always)]
     fn stack_mut<'a>(
-        ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
-    ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> {
+        ecx: &'a mut InterpCx<'tcx, Self>,
+    ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>> {
         &mut ecx.machine.stack
     }
 
@@ -707,7 +702,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
     }
 
     fn retag_ptr_value(
-        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        ecx: &mut InterpCx<'tcx, Self>,
         _kind: mir::RetagKind,
         val: &ImmTy<'tcx, CtfeProvenance>,
     ) -> InterpResult<'tcx, ImmTy<'tcx, CtfeProvenance>> {
@@ -748,15 +743,22 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
         Ok(())
     }
 
-    fn before_alloc_read(
-        ecx: &InterpCx<'mir, 'tcx, Self>,
-        alloc_id: AllocId,
-    ) -> InterpResult<'tcx> {
+    fn before_alloc_read(ecx: &InterpCx<'tcx, Self>, alloc_id: AllocId) -> InterpResult<'tcx> {
+        // Check if this is the currently evaluated static.
         if Some(alloc_id) == ecx.machine.static_root_ids.map(|(id, _)| id) {
-            Err(ConstEvalErrKind::RecursiveStatic.into())
-        } else {
-            Ok(())
+            return Err(ConstEvalErrKind::RecursiveStatic.into());
         }
+        // If this is another static, make sure we fire off the query to detect cycles.
+        // But only do that when checks for static recursion are enabled.
+        if ecx.machine.static_root_ids.is_some() {
+            if let Some(GlobalAlloc::Static(def_id)) = ecx.tcx.try_get_global_alloc(alloc_id) {
+                if ecx.tcx.is_foreign_item(def_id) {
+                    throw_unsup!(ExternStatic(def_id));
+                }
+                ecx.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
+            }
+        }
+        Ok(())
     }
 }
 
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
index 8efc67bcb0c..4ae4816e33a 100644
--- a/compiler/rustc_const_eval/src/const_eval/mod.rs
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -1,10 +1,12 @@
 // Not in interpret to make sure we do not use private implementation details
 
+use rustc_middle::bug;
 use rustc_middle::mir;
 use rustc_middle::mir::interpret::InterpErrorInfo;
 use rustc_middle::query::{Key, TyCtxtAt};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_target::abi::VariantIdx;
+use tracing::instrument;
 
 use crate::interpret::{format_interp_error, InterpCx};
 
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index dcfce4e35e0..5312f1f946f 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -1,10 +1,12 @@
 use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_middle::bug;
 use rustc_middle::mir;
 use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
 use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
 use rustc_span::DUMMY_SP;
 use rustc_target::abi::{Abi, VariantIdx};
+use tracing::{debug, instrument, trace};
 
 use super::eval_queries::{mk_eval_cx_to_read_const_val, op_to_const};
 use super::machine::CompileTimeEvalContext;
@@ -19,7 +21,7 @@ use crate::interpret::{
 
 #[instrument(skip(ecx), level = "debug")]
 fn branches<'tcx>(
-    ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+    ecx: &CompileTimeEvalContext<'tcx>,
     place: &MPlaceTy<'tcx>,
     n: usize,
     variant: Option<VariantIdx>,
@@ -57,7 +59,7 @@ fn branches<'tcx>(
 
 #[instrument(skip(ecx), level = "debug")]
 fn slice_branches<'tcx>(
-    ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+    ecx: &CompileTimeEvalContext<'tcx>,
     place: &MPlaceTy<'tcx>,
     num_nodes: &mut usize,
 ) -> ValTreeCreationResult<'tcx> {
@@ -75,7 +77,7 @@ fn slice_branches<'tcx>(
 
 #[instrument(skip(ecx), level = "debug")]
 fn const_to_valtree_inner<'tcx>(
-    ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+    ecx: &CompileTimeEvalContext<'tcx>,
     place: &MPlaceTy<'tcx>,
     num_nodes: &mut usize,
 ) -> ValTreeCreationResult<'tcx> {
@@ -217,7 +219,7 @@ fn reconstruct_place_meta<'tcx>(
 
 #[instrument(skip(ecx), level = "debug", ret)]
 fn create_valtree_place<'tcx>(
-    ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+    ecx: &mut CompileTimeEvalContext<'tcx>,
     layout: TyAndLayout<'tcx>,
     valtree: ty::ValTree<'tcx>,
 ) -> MPlaceTy<'tcx> {
@@ -362,7 +364,7 @@ pub fn valtree_to_const_value<'tcx>(
 
 /// Put a valtree into memory and return a reference to that.
 fn valtree_to_ref<'tcx>(
-    ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+    ecx: &mut CompileTimeEvalContext<'tcx>,
     valtree: ty::ValTree<'tcx>,
     pointee_ty: Ty<'tcx>,
 ) -> Immediate {
@@ -378,7 +380,7 @@ fn valtree_to_ref<'tcx>(
 
 #[instrument(skip(ecx), level = "debug")]
 fn valtree_into_mplace<'tcx>(
-    ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+    ecx: &mut CompileTimeEvalContext<'tcx>,
     place: &MPlaceTy<'tcx>,
     valtree: ty::ValTree<'tcx>,
 ) {
@@ -455,6 +457,6 @@ fn valtree_into_mplace<'tcx>(
     }
 }
 
-fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: &MPlaceTy<'tcx>) {
+fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx>, place: &MPlaceTy<'tcx>) {
     trace!("{:?}", ecx.dump_place(&PlaceTy::from(place.clone())));
 }
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
index a60cedd6500..e5ea4c3442e 100644
--- a/compiler/rustc_const_eval/src/errors.rs
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -1,5 +1,6 @@
 use std::borrow::Cow;
 
+use either::Either;
 use rustc_errors::{
     codes::*, Diag, DiagArgValue, DiagCtxt, DiagMessage, Diagnostic, EmissionGuarantee, Level,
 };
@@ -481,6 +482,8 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
             DivisionOverflow => const_eval_division_overflow,
             RemainderOverflow => const_eval_remainder_overflow,
             PointerArithOverflow => const_eval_pointer_arithmetic_overflow,
+            ArithOverflow { .. } => const_eval_overflow_arith,
+            ShiftOverflow { .. } => const_eval_overflow_shift,
             InvalidMeta(InvalidMetaKind::SliceTooBig) => const_eval_invalid_meta_slice,
             InvalidMeta(InvalidMetaKind::TooBig) => const_eval_invalid_meta,
             UnterminatedCString(_) => const_eval_unterminated_c_string,
@@ -498,6 +501,7 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
             InvalidTag(_) => const_eval_invalid_tag,
             InvalidFunctionPointer(_) => const_eval_invalid_function_pointer,
             InvalidVTablePointer(_) => const_eval_invalid_vtable_pointer,
+            InvalidVTableTrait { .. } => const_eval_invalid_vtable_trait,
             InvalidStr(_) => const_eval_invalid_str,
             InvalidUninitBytes(None) => const_eval_invalid_uninit_bytes_unknown,
             InvalidUninitBytes(Some(_)) => const_eval_invalid_uninit_bytes,
@@ -537,6 +541,20 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
             | DeadLocal
             | UninhabitedEnumVariantWritten(_)
             | UninhabitedEnumVariantRead(_) => {}
+
+            ArithOverflow { intrinsic } => {
+                diag.arg("intrinsic", intrinsic);
+            }
+            ShiftOverflow { intrinsic, shift_amount } => {
+                diag.arg("intrinsic", intrinsic);
+                diag.arg(
+                    "shift_amount",
+                    match shift_amount {
+                        Either::Left(v) => v.to_string(),
+                        Either::Right(v) => v.to_string(),
+                    },
+                );
+            }
             BoundsCheckFailed { len, index } => {
                 diag.arg("len", len);
                 diag.arg("index", index);
@@ -544,6 +562,13 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
             UnterminatedCString(ptr) | InvalidFunctionPointer(ptr) | InvalidVTablePointer(ptr) => {
                 diag.arg("pointer", ptr);
             }
+            InvalidVTableTrait { expected_trait, vtable_trait } => {
+                diag.arg("expected_trait", expected_trait.to_string());
+                diag.arg(
+                    "vtable_trait",
+                    vtable_trait.map(|t| t.to_string()).unwrap_or_else(|| format!("<trivial>")),
+                );
+            }
             PointerUseAfterFree(alloc_id, msg) => {
                 diag.arg("alloc_id", alloc_id)
                     .arg("bad_pointer_message", bad_pointer_message(msg, dcx));
@@ -634,6 +659,7 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
             UninhabitedEnumVariant => const_eval_validation_uninhabited_enum_variant,
             Uninit { .. } => const_eval_validation_uninit,
             InvalidVTablePtr { .. } => const_eval_validation_invalid_vtable_ptr,
+            InvalidMetaWrongTrait { .. } => const_eval_validation_invalid_vtable_trait,
             InvalidMetaSliceTooLarge { ptr_kind: PointerKind::Box } => {
                 const_eval_validation_invalid_box_slice_meta
             }
@@ -773,6 +799,13 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
             DanglingPtrNoProvenance { pointer, .. } => {
                 err.arg("pointer", pointer);
             }
+            InvalidMetaWrongTrait { expected_trait: ref_trait, vtable_trait } => {
+                err.arg("ref_trait", ref_trait.to_string());
+                err.arg(
+                    "vtable_trait",
+                    vtable_trait.map(|t| t.to_string()).unwrap_or_else(|| format!("<trivial>")),
+                );
+            }
             NullPtr { .. }
             | PtrToStatic { .. }
             | ConstRefToMutable
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index 9447d18fe8c..19414c72c6a 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -7,16 +7,19 @@ use rustc_middle::mir::CastKind;
 use rustc_middle::ty::adjustment::PointerCoercion;
 use rustc_middle::ty::layout::{IntegerExt, LayoutOf, TyAndLayout};
 use rustc_middle::ty::{self, FloatTy, Ty};
+use rustc_middle::{bug, span_bug};
 use rustc_target::abi::Integer;
 use rustc_type_ir::TyKind::*;
+use tracing::trace;
 
 use super::{
-    util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy,
+    err_inval, throw_ub, throw_ub_custom, util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate,
+    InterpCx, Machine, OpTy, PlaceTy,
 };
 
 use crate::fluent_generated as fluent;
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     pub fn cast(
         &mut self,
         src: &OpTy<'tcx, M::Provenance>,
@@ -204,7 +207,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         assert!(cast_to.ty.is_unsafe_ptr());
         // Handle casting any ptr to raw ptr (might be a fat ptr).
         if cast_to.size == src.layout.size {
-            // Thin or fat pointer that just hast the ptr kind of target type changed.
+            // Thin or fat pointer that just has the ptr kind of target type changed.
             return Ok(ImmTy::from_immediate(**src, cast_to));
         } else {
             // Casting the metadata away from a fat ptr.
@@ -321,13 +324,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         use rustc_type_ir::TyKind::*;
 
         fn adjust_nan<
-            'mir,
-            'tcx: 'mir,
-            M: Machine<'mir, 'tcx>,
+            'tcx,
+            M: Machine<'tcx>,
             F1: rustc_apfloat::Float + FloatConvert<F2>,
             F2: rustc_apfloat::Float,
         >(
-            ecx: &InterpCx<'mir, 'tcx, M>,
+            ecx: &InterpCx<'tcx, M>,
             f1: F1,
             f2: F2,
         ) -> F2 {
@@ -393,6 +395,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let val = self.read_immediate(src)?;
                 if data_a.principal() == data_b.principal() {
                     // A NOP cast that doesn't actually change anything, should be allowed even with mismatching vtables.
+                    // (But currently mismatching vtables violate the validity invariant so UB is triggered anyway.)
                     return self.write_immediate(*val, dest);
                 }
                 let (old_data, old_vptr) = val.to_scalar_pair();
@@ -400,7 +403,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let old_vptr = old_vptr.to_pointer(self)?;
                 let (ty, old_trait) = self.get_ptr_vtable(old_vptr)?;
                 if old_trait != data_a.principal() {
-                    throw_ub_custom!(fluent::const_eval_upcast_mismatch);
+                    throw_ub!(InvalidVTableTrait {
+                        expected_trait: data_a,
+                        vtable_trait: old_trait,
+                    });
                 }
                 let new_vptr = self.get_vtable_ptr(ty, data_b.principal())?;
                 self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest)
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index 704f597cfdb..224d17dbf52 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -1,14 +1,18 @@
 //! Functions for reading and writing discriminants of multi-variant layouts (enums and coroutines).
 
 use rustc_middle::mir;
+use rustc_middle::span_bug;
 use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt};
 use rustc_middle::ty::{self, ScalarInt, Ty};
 use rustc_target::abi::{self, TagEncoding};
 use rustc_target::abi::{VariantIdx, Variants};
+use tracing::{instrument, trace};
 
-use super::{ImmTy, InterpCx, InterpResult, Machine, Readable, Scalar, Writeable};
+use super::{
+    err_ub, throw_ub, ImmTy, InterpCx, InterpResult, Machine, Readable, Scalar, Writeable,
+};
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// Writes the discriminant of the given variant.
     ///
     /// If the variant is uninhabited, this is UB.
@@ -169,7 +173,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
                         let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
                         let variant_index_relative_val =
-                            self.wrapping_binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
+                            self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
                         let variant_index_relative =
                             variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
                         // Check if this is in the range that indicates an actual discriminant.
@@ -289,14 +293,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let variant_index_relative_val =
                     ImmTy::from_uint(variant_index_relative, tag_layout);
                 let tag = self
-                    .wrapping_binary_op(
-                        mir::BinOp::Add,
-                        &variant_index_relative_val,
-                        &niche_start_val,
-                    )?
+                    .binary_op(mir::BinOp::Add, &variant_index_relative_val, &niche_start_val)?
                     .to_scalar()
-                    .try_to_int()
-                    .unwrap();
+                    .assert_int();
                 Ok(Some((tag, tag_field)))
             }
         }
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index 62d169db628..7c2100fcbe3 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -2,6 +2,7 @@ use std::cell::Cell;
 use std::{fmt, mem};
 
 use either::{Either, Left, Right};
+use tracing::{debug, info, info_span, instrument, trace};
 
 use hir::CRATE_HIR_ID;
 use rustc_errors::DiagCtxt;
@@ -17,21 +18,23 @@ use rustc_middle::ty::layout::{
     TyAndLayout,
 };
 use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, Variance};
+use rustc_middle::{bug, span_bug};
 use rustc_mir_dataflow::storage::always_storage_live_locals;
 use rustc_session::Limit;
 use rustc_span::Span;
 use rustc_target::abi::{call::FnAbi, Align, HasDataLayout, Size, TargetDataLayout};
 
 use super::{
-    GlobalId, Immediate, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta,
-    Memory, MemoryKind, OpTy, Operand, Place, PlaceTy, Pointer, PointerArithmetic, Projectable,
-    Provenance, Scalar, StackPopJump,
+    err_inval, throw_inval, throw_ub, throw_ub_custom, throw_unsup, GlobalId, Immediate,
+    InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, MemoryKind,
+    OpTy, Operand, Place, PlaceTy, Pointer, PointerArithmetic, Projectable, Provenance, Scalar,
+    StackPopJump,
 };
 use crate::errors;
 use crate::util;
 use crate::{fluent_generated as fluent, ReportErrorExt};
 
-pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+pub struct InterpCx<'tcx, M: Machine<'tcx>> {
     /// Stores the `Machine` instance.
     ///
     /// Note: the stack is provided by the machine.
@@ -46,7 +49,7 @@ pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
     pub(crate) param_env: ty::ParamEnv<'tcx>,
 
     /// The virtual memory system.
-    pub memory: Memory<'mir, 'tcx, M>,
+    pub memory: Memory<'tcx, M>,
 
     /// The recursion limit (cached from `tcx.recursion_limit(())`)
     pub recursion_limit: Limit,
@@ -87,12 +90,12 @@ impl Drop for SpanGuard {
 }
 
 /// A stack frame.
-pub struct Frame<'mir, 'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> {
+pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> {
     ////////////////////////////////////////////////////////////////////////////////
     // Function and callsite information
     ////////////////////////////////////////////////////////////////////////////////
     /// The MIR for the function called on this frame.
-    pub body: &'mir mir::Body<'tcx>,
+    pub body: &'tcx mir::Body<'tcx>,
 
     /// The def_id and args of the current function.
     pub instance: ty::Instance<'tcx>,
@@ -229,8 +232,8 @@ impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> {
     }
 }
 
-impl<'mir, 'tcx, Prov: Provenance> Frame<'mir, 'tcx, Prov> {
-    pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Prov, Extra> {
+impl<'tcx, Prov: Provenance> Frame<'tcx, Prov> {
+    pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'tcx, Prov, Extra> {
         Frame {
             body: self.body,
             instance: self.instance,
@@ -244,7 +247,7 @@ impl<'mir, 'tcx, Prov: Provenance> Frame<'mir, 'tcx, Prov> {
     }
 }
 
-impl<'mir, 'tcx, Prov: Provenance, Extra> Frame<'mir, 'tcx, Prov, Extra> {
+impl<'tcx, Prov: Provenance, Extra> Frame<'tcx, Prov, Extra> {
     /// Get the current location within the Frame.
     ///
     /// If this is `Left`, we are not currently executing any particular statement in
@@ -342,16 +345,16 @@ impl<'tcx> FrameInfo<'tcx> {
     }
 }
 
-impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> HasDataLayout for InterpCx<'tcx, M> {
     #[inline]
     fn data_layout(&self) -> &TargetDataLayout {
         &self.tcx.data_layout
     }
 }
 
-impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M>
+impl<'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'tcx, M>
 where
-    M: Machine<'mir, 'tcx>,
+    M: Machine<'tcx>,
 {
     #[inline]
     fn tcx(&self) -> TyCtxt<'tcx> {
@@ -359,16 +362,16 @@ where
     }
 }
 
-impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M>
+impl<'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'tcx, M>
 where
-    M: Machine<'mir, 'tcx>,
+    M: Machine<'tcx>,
 {
     fn param_env(&self) -> ty::ParamEnv<'tcx> {
         self.param_env
     }
 }
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> LayoutOfHelpers<'tcx> for InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> LayoutOfHelpers<'tcx> for InterpCx<'tcx, M> {
     type LayoutOfResult = InterpResult<'tcx, TyAndLayout<'tcx>>;
 
     #[inline]
@@ -388,7 +391,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> LayoutOfHelpers<'tcx> for InterpC
     }
 }
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> FnAbiOfHelpers<'tcx> for InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> FnAbiOfHelpers<'tcx> for InterpCx<'tcx, M> {
     type FnAbiOfResult = InterpResult<'tcx, &'tcx FnAbi<'tcx, Ty<'tcx>>>;
 
     fn handle_fn_abi_err(
@@ -481,7 +484,7 @@ pub fn format_interp_error<'tcx>(dcx: &DiagCtxt, e: InterpErrorInfo<'tcx>) -> St
     s
 }
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     pub fn new(
         tcx: TyCtxt<'tcx>,
         root_span: Span,
@@ -514,14 +517,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     }
 
     #[inline(always)]
-    pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>] {
+    pub(crate) fn stack(&self) -> &[Frame<'tcx, M::Provenance, M::FrameExtra>] {
         M::stack(self)
     }
 
     #[inline(always)]
-    pub(crate) fn stack_mut(
-        &mut self,
-    ) -> &mut Vec<Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>> {
+    pub(crate) fn stack_mut(&mut self) -> &mut Vec<Frame<'tcx, M::Provenance, M::FrameExtra>> {
         M::stack_mut(self)
     }
 
@@ -533,17 +534,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     }
 
     #[inline(always)]
-    pub fn frame(&self) -> &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra> {
+    pub fn frame(&self) -> &Frame<'tcx, M::Provenance, M::FrameExtra> {
         self.stack().last().expect("no call frames exist")
     }
 
     #[inline(always)]
-    pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::Provenance, M::FrameExtra> {
+    pub fn frame_mut(&mut self) -> &mut Frame<'tcx, M::Provenance, M::FrameExtra> {
         self.stack_mut().last_mut().expect("no call frames exist")
     }
 
     #[inline(always)]
-    pub fn body(&self) -> &'mir mir::Body<'tcx> {
+    pub fn body(&self) -> &'tcx mir::Body<'tcx> {
         self.frame().body
     }
 
@@ -599,7 +600,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         T: TypeFoldable<TyCtxt<'tcx>>,
     >(
         &self,
-        frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
+        frame: &Frame<'tcx, M::Provenance, M::FrameExtra>,
         value: T,
     ) -> Result<T, ErrorHandled> {
         frame
@@ -677,7 +678,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     #[inline(always)]
     pub(super) fn layout_of_local(
         &self,
-        frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
+        frame: &Frame<'tcx, M::Provenance, M::FrameExtra>,
         local: mir::Local,
         layout: Option<TyAndLayout<'tcx>>,
     ) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
@@ -800,7 +801,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     pub fn push_stack_frame(
         &mut self,
         instance: ty::Instance<'tcx>,
-        body: &'mir mir::Body<'tcx>,
+        body: &'tcx mir::Body<'tcx>,
         return_place: &MPlaceTy<'tcx, M::Provenance>,
         return_to_block: StackPopCleanup,
     ) -> InterpResult<'tcx> {
@@ -818,19 +819,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             tracing_span: SpanGuard::new(),
             extra: (),
         };
-        let frame = M::init_frame_extra(self, pre_frame)?;
+        let frame = M::init_frame(self, pre_frame)?;
         self.stack_mut().push(frame);
 
         // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
-        if M::POST_MONO_CHECKS {
-            for &const_ in &body.required_consts {
-                let c = self
-                    .instantiate_from_current_frame_and_normalize_erasing_regions(const_.const_)?;
-                c.eval(*self.tcx, self.param_env, const_.span).map_err(|err| {
-                    err.emit_note(*self.tcx);
-                    err
-                })?;
-            }
+        for &const_ in &body.required_consts {
+            let c =
+                self.instantiate_from_current_frame_and_normalize_erasing_regions(const_.const_)?;
+            c.eval(*self.tcx, self.param_env, const_.span).map_err(|err| {
+                err.emit_note(*self.tcx);
+                err
+            })?;
         }
 
         // done
@@ -1181,8 +1180,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
         M::eval_mir_constant(self, *val, span, layout, |ecx, val, span, layout| {
             let const_val = val.eval(*ecx.tcx, ecx.param_env, span).map_err(|err| {
-                // FIXME: somehow this is reachable even when POST_MONO_CHECKS is on.
-                // Are we not always populating `required_consts`?
+                if M::ALL_CONSTS_ARE_PRECHECKED {
+                    match err {
+                        ErrorHandled::TooGeneric(..) => {},
+                        ErrorHandled::Reported(reported, span) => {
+                            if reported.is_tainted_by_errors() {
+                                // const-eval will return "tainted" errors if e.g. the layout cannot
+                                // be computed as the type references non-existing names.
+                                // See <https://github.com/rust-lang/rust/issues/124348>.
+                            } else {
+                                // Looks like the const is not captued by `required_consts`, that's bad.
+                                span_bug!(span, "interpret const eval failure of {val:?} which is not in required_consts");
+                            }
+                        }
+                    }
+                }
                 err.emit_note(*ecx.tcx);
                 err
             })?;
@@ -1191,10 +1203,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     }
 
     #[must_use]
-    pub fn dump_place(
-        &self,
-        place: &PlaceTy<'tcx, M::Provenance>,
-    ) -> PlacePrinter<'_, 'mir, 'tcx, M> {
+    pub fn dump_place(&self, place: &PlaceTy<'tcx, M::Provenance>) -> PlacePrinter<'_, 'tcx, M> {
         PlacePrinter { ecx: self, place: *place.place() }
     }
 
@@ -1206,14 +1215,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
 #[doc(hidden)]
 /// Helper struct for the `dump_place` function.
-pub struct PlacePrinter<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
-    ecx: &'a InterpCx<'mir, 'tcx, M>,
+pub struct PlacePrinter<'a, 'tcx, M: Machine<'tcx>> {
+    ecx: &'a InterpCx<'tcx, M>,
     place: Place<M::Provenance>,
 }
 
-impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
-    for PlacePrinter<'a, 'mir, 'tcx, M>
-{
+impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for PlacePrinter<'a, 'tcx, M> {
     fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
         match self.place {
             Place::Local { local, offset, locals_addr } => {
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index d0f0190fea7..8d0b267e1a9 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -16,22 +16,20 @@
 use hir::def::DefKind;
 use rustc_ast::Mutability;
 use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
-use rustc_errors::ErrorGuaranteed;
 use rustc_hir as hir;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
 use rustc_middle::mir::interpret::{ConstAllocation, CtfeProvenance, InterpResult};
 use rustc_middle::query::TyCtxtAt;
 use rustc_middle::ty::layout::TyAndLayout;
-use rustc_session::lint;
 use rustc_span::def_id::LocalDefId;
 use rustc_span::sym;
+use tracing::{instrument, trace};
 
-use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy};
+use super::{err_ub, AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy};
 use crate::const_eval;
-use crate::errors::{DanglingPtrInFinal, MutablePtrInFinal, NestedStaticInThreadLocal};
+use crate::errors::NestedStaticInThreadLocal;
 
-pub trait CompileTimeMachine<'mir, 'tcx: 'mir, T> = Machine<
-        'mir,
+pub trait CompileTimeMachine<'tcx, T> = Machine<
         'tcx,
         MemoryKind = T,
         Provenance = CtfeProvenance,
@@ -47,7 +45,7 @@ pub trait HasStaticRootDefId {
     fn static_def_id(&self) -> Option<LocalDefId>;
 }
 
-impl HasStaticRootDefId for const_eval::CompileTimeInterpreter<'_, '_> {
+impl HasStaticRootDefId for const_eval::CompileTimeInterpreter<'_> {
     fn static_def_id(&self) -> Option<LocalDefId> {
         Some(self.static_root_ids?.1)
     }
@@ -60,8 +58,8 @@ impl HasStaticRootDefId for const_eval::CompileTimeInterpreter<'_, '_> {
 /// already mutable (as a sanity check).
 ///
 /// Returns an iterator over all relocations referred to by this allocation.
-fn intern_shallow<'rt, 'mir, 'tcx, T, M: CompileTimeMachine<'mir, 'tcx, T>>(
-    ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
+fn intern_shallow<'rt, 'tcx, T, M: CompileTimeMachine<'tcx, T>>(
+    ecx: &'rt mut InterpCx<'tcx, M>,
     alloc_id: AllocId,
     mutability: Mutability,
 ) -> Result<impl Iterator<Item = CtfeProvenance> + 'tcx, ()> {
@@ -134,6 +132,12 @@ pub enum InternKind {
     Promoted,
 }
 
+#[derive(Debug)]
+pub enum InternResult {
+    FoundBadMutablePointer,
+    FoundDanglingPointer,
+}
+
 /// Intern `ret` and everything it references.
 ///
 /// This *cannot raise an interpreter error*. Doing so is left to validation, which
@@ -141,15 +145,11 @@ pub enum InternKind {
 ///
 /// For `InternKind::Static` the root allocation will not be interned, but must be handled by the caller.
 #[instrument(level = "debug", skip(ecx))]
-pub fn intern_const_alloc_recursive<
-    'mir,
-    'tcx: 'mir,
-    M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>,
->(
-    ecx: &mut InterpCx<'mir, 'tcx, M>,
+pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx, const_eval::MemoryKind>>(
+    ecx: &mut InterpCx<'tcx, M>,
     intern_kind: InternKind,
     ret: &MPlaceTy<'tcx>,
-) -> Result<(), ErrorGuaranteed> {
+) -> Result<(), InternResult> {
     // We are interning recursively, and for mutability we are distinguishing the "root" allocation
     // that we are starting in, and all other allocations that we are encountering recursively.
     let (base_mutability, inner_mutability, is_static) = match intern_kind {
@@ -201,7 +201,7 @@ pub fn intern_const_alloc_recursive<
     // Whether we encountered a bad mutable pointer.
     // We want to first report "dangling" and then "mutable", so we need to delay reporting these
     // errors.
-    let mut found_bad_mutable_pointer = false;
+    let mut result = Ok(());
 
     // Keep interning as long as there are things to intern.
     // We show errors if there are dangling pointers, or mutable pointers in immutable contexts
@@ -251,7 +251,10 @@ pub fn intern_const_alloc_recursive<
             // on the promotion analysis not screwing up to ensure that it is sound to intern
             // promoteds as immutable.
             trace!("found bad mutable pointer");
-            found_bad_mutable_pointer = true;
+            // Prefer dangling pointer errors over mutable pointer errors
+            if result.is_ok() {
+                result = Err(InternResult::FoundBadMutablePointer);
+            }
         }
         if ecx.tcx.try_get_global_alloc(alloc_id).is_some() {
             // Already interned.
@@ -269,32 +272,21 @@ pub fn intern_const_alloc_recursive<
         // pointers before deciding which allocations can be made immutable; but for now we are
         // okay with losing some potential for immutability here. This can anyway only affect
         // `static mut`.
-        todo.extend(intern_shallow(ecx, alloc_id, inner_mutability).map_err(|()| {
-            ecx.tcx.dcx().emit_err(DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind })
-        })?);
-    }
-    if found_bad_mutable_pointer {
-        let err_diag = MutablePtrInFinal { span: ecx.tcx.span, kind: intern_kind };
-        ecx.tcx.emit_node_span_lint(
-            lint::builtin::CONST_EVAL_MUTABLE_PTR_IN_FINAL_VALUE,
-            ecx.best_lint_scope(),
-            err_diag.span,
-            err_diag,
-        )
+        match intern_shallow(ecx, alloc_id, inner_mutability) {
+            Ok(nested) => todo.extend(nested),
+            Err(()) => {
+                ecx.tcx.dcx().delayed_bug("found dangling pointer during const interning");
+                result = Err(InternResult::FoundDanglingPointer);
+            }
+        }
     }
-
-    Ok(())
+    result
 }
 
 /// Intern `ret`. This function assumes that `ret` references no other allocation.
 #[instrument(level = "debug", skip(ecx))]
-pub fn intern_const_alloc_for_constprop<
-    'mir,
-    'tcx: 'mir,
-    T,
-    M: CompileTimeMachine<'mir, 'tcx, T>,
->(
-    ecx: &mut InterpCx<'mir, 'tcx, M>,
+pub fn intern_const_alloc_for_constprop<'tcx, T, M: CompileTimeMachine<'tcx, T>>(
+    ecx: &mut InterpCx<'tcx, M>,
     alloc_id: AllocId,
 ) -> InterpResult<'tcx, ()> {
     if ecx.tcx.try_get_global_alloc(alloc_id).is_some() {
@@ -313,19 +305,14 @@ pub fn intern_const_alloc_for_constprop<
     Ok(())
 }
 
-impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
-    InterpCx<'mir, 'tcx, M>
-{
+impl<'tcx, M: super::intern::CompileTimeMachine<'tcx, !>> InterpCx<'tcx, M> {
     /// A helper function that allocates memory for the layout given and gives you access to mutate
     /// it. Once your own mutation code is done, the backing `Allocation` is removed from the
     /// current `Memory` and interned as read-only into the global memory.
     pub fn intern_with_temp_alloc(
         &mut self,
         layout: TyAndLayout<'tcx>,
-        f: impl FnOnce(
-            &mut InterpCx<'mir, 'tcx, M>,
-            &PlaceTy<'tcx, M::Provenance>,
-        ) -> InterpResult<'tcx, ()>,
+        f: impl FnOnce(&mut InterpCx<'tcx, M>, &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, ()>,
     ) -> InterpResult<'tcx, AllocId> {
         // `allocate` picks a fresh AllocId that we will associate with its data below.
         let dest = self.allocate(layout, MemoryKind::Stack)?;
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index 63c709d8aed..18b76443cd9 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -8,21 +8,19 @@ use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
 use rustc_middle::ty::GenericArgsRef;
 use rustc_middle::ty::{Ty, TyCtxt};
 use rustc_middle::{
-    mir::{
-        self,
-        interpret::{
-            Allocation, ConstAllocation, GlobalId, InterpResult, PointerArithmetic, Scalar,
-        },
-        BinOp, ConstValue, NonDivergingIntrinsic,
-    },
+    bug,
+    mir::{self, BinOp, ConstValue, NonDivergingIntrinsic},
     ty::layout::TyAndLayout,
 };
 use rustc_span::symbol::{sym, Symbol};
 use rustc_target::abi::Size;
+use tracing::trace;
 
 use super::{
-    memory::MemoryKind, util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx,
-    MPlaceTy, Machine, OpTy, Pointer,
+    err_inval, err_ub_custom, err_unsup_format, memory::MemoryKind, throw_inval, throw_ub_custom,
+    throw_ub_format, util::ensure_monomorphic_enough, Allocation, CheckInAllocMsg, ConstAllocation,
+    GlobalId, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, Pointer, PointerArithmetic,
+    Scalar,
 };
 
 use crate::fluent_generated as fluent;
@@ -100,7 +98,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
     })
 }
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// Returns `true` if emulation happened.
     /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
     /// intrinsic handling.
@@ -113,10 +111,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     ) -> InterpResult<'tcx, bool> {
         let instance_args = instance.args;
         let intrinsic_name = self.tcx.item_name(instance.def_id());
-        let Some(ret) = ret else {
-            // We don't support any intrinsic without return place.
-            return Ok(false);
-        };
 
         match intrinsic_name {
             sym::caller_location => {
@@ -173,7 +167,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let ty = instance_args.type_at(0);
                 let layout = self.layout_of(ty)?;
                 let val = self.read_scalar(&args[0])?;
-                let out_val = self.numeric_intrinsic(intrinsic_name, val, layout)?;
+
+                let out_val = self.numeric_intrinsic(intrinsic_name, val, layout, dest.layout)?;
                 self.write_scalar(out_val, dest)?;
             }
             sym::saturating_add | sym::saturating_sub => {
@@ -200,12 +195,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             sym::rotate_left | sym::rotate_right => {
                 // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
                 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
-                let layout = self.layout_of(instance_args.type_at(0))?;
+                let layout_val = self.layout_of(instance_args.type_at(0))?;
                 let val = self.read_scalar(&args[0])?;
-                let val_bits = val.to_bits(layout.size)?;
+                let val_bits = val.to_bits(layout_val.size)?;
+
+                let layout_raw_shift = self.layout_of(self.tcx.types.u32)?;
                 let raw_shift = self.read_scalar(&args[1])?;
-                let raw_shift_bits = raw_shift.to_bits(layout.size)?;
-                let width_bits = u128::from(layout.size.bits());
+                let raw_shift_bits = raw_shift.to_bits(layout_raw_shift.size)?;
+
+                let width_bits = u128::from(layout_val.size.bits());
                 let shift_bits = raw_shift_bits % width_bits;
                 let inv_shift_bits = (width_bits - shift_bits) % width_bits;
                 let result_bits = if intrinsic_name == sym::rotate_left {
@@ -213,8 +211,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 } else {
                     (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
                 };
-                let truncated_bits = self.truncate(result_bits, layout);
-                let result = Scalar::from_uint(truncated_bits, layout.size);
+                let truncated_bits = self.truncate(result_bits, layout_val);
+                let result = Scalar::from_uint(truncated_bits, layout_val.size);
                 self.write_scalar(result, dest)?;
             }
             sym::copy => {
@@ -249,14 +247,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     match (self.ptr_try_get_alloc_id(a), self.ptr_try_get_alloc_id(b)) {
                         (Err(a), Err(b)) => {
                             // Neither pointer points to an allocation.
-                            // If these are inequal or null, this *will* fail the deref check below.
+                            // This is okay only if they are the same.
+                            if a != b {
+                                // We'd catch this below in the "dereferenceable" check, but
+                                // show a nicer error for this particular case.
+                                throw_ub_custom!(
+                                    fluent::const_eval_offset_from_different_integers,
+                                    name = intrinsic_name,
+                                );
+                            }
+                            // This will always return 0.
                             (a, b)
                         }
                         (Err(_), _) | (_, Err(_)) => {
                             // We managed to find a valid allocation for one pointer, but not the other.
                             // That means they are definitely not pointing to the same allocation.
                             throw_ub_custom!(
-                                fluent::const_eval_different_allocations,
+                                fluent::const_eval_offset_from_different_allocations,
                                 name = intrinsic_name,
                             );
                         }
@@ -264,7 +271,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                             // Found allocation for both. They must be into the same allocation.
                             if a_alloc_id != b_alloc_id {
                                 throw_ub_custom!(
-                                    fluent::const_eval_different_allocations,
+                                    fluent::const_eval_offset_from_different_allocations,
                                     name = intrinsic_name,
                                 );
                             }
@@ -280,13 +287,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     let (val, overflowed) = {
                         let a_offset = ImmTy::from_uint(a_offset, usize_layout);
                         let b_offset = ImmTy::from_uint(b_offset, usize_layout);
-                        self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?
+                        self.binary_op(BinOp::SubWithOverflow, &a_offset, &b_offset)?
+                            .to_scalar_pair()
                     };
-                    if overflowed {
+                    if overflowed.to_bool()? {
                         // a < b
                         if intrinsic_name == sym::ptr_offset_from_unsigned {
                             throw_ub_custom!(
-                                fluent::const_eval_unsigned_offset_from_overflow,
+                                fluent::const_eval_offset_from_unsigned_overflow,
                                 a_offset = a_offset,
                                 b_offset = b_offset,
                             );
@@ -294,7 +302,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         // The signed form of the intrinsic allows this. If we interpret the
                         // difference as isize, we'll get the proper signed difference. If that
                         // seems *positive*, they were more than isize::MAX apart.
-                        let dist = val.to_scalar().to_target_isize(self)?;
+                        let dist = val.to_target_isize(self)?;
                         if dist >= 0 {
                             throw_ub_custom!(
                                 fluent::const_eval_offset_from_underflow,
@@ -304,7 +312,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         dist
                     } else {
                         // b >= a
-                        let dist = val.to_scalar().to_target_isize(self)?;
+                        let dist = val.to_target_isize(self)?;
                         // If converting to isize produced a *negative* result, we had an overflow
                         // because they were more than isize::MAX apart.
                         if dist < 0 {
@@ -372,7 +380,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     };
 
                     M::panic_nounwind(self, &msg)?;
-                    // Skip the `go_to_block` at the end.
+                    // Skip the `return_to_block` at the end (we panicked, we do not return).
                     return Ok(true);
                 }
             }
@@ -410,7 +418,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 }
                 self.copy_op(&self.project_index(&input, index)?, dest)?;
             }
-            sym::likely | sym::unlikely | sym::black_box => {
+            sym::black_box => {
                 // These just return their argument
                 self.copy_op(&args[0], dest)?;
             }
@@ -433,11 +441,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 self.write_scalar(Scalar::from_target_usize(align.bytes(), self), dest)?;
             }
 
+            // Unsupported intrinsic: skip the return_to_block below.
             _ => return Ok(false),
         }
 
         trace!("{:?}", self.dump_place(&dest.clone().into()));
-        self.go_to_block(ret);
+        self.return_to_block(ret)?;
         Ok(true)
     }
 
@@ -472,6 +481,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         name: Symbol,
         val: Scalar<M::Provenance>,
         layout: TyAndLayout<'tcx>,
+        ret_layout: TyAndLayout<'tcx>,
     ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
         assert!(layout.ty.is_integral(), "invalid type for numeric intrinsic: {}", layout.ty);
         let bits = val.to_bits(layout.size)?;
@@ -483,11 +493,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
             sym::ctlz | sym::ctlz_nonzero => u128::from(bits.leading_zeros()) - extra,
             sym::cttz | sym::cttz_nonzero => u128::from((bits << extra).trailing_zeros()) - extra,
-            sym::bswap => (bits << extra).swap_bytes(),
-            sym::bitreverse => (bits << extra).reverse_bits(),
+            sym::bswap => {
+                assert_eq!(layout, ret_layout);
+                (bits << extra).swap_bytes()
+            }
+            sym::bitreverse => {
+                assert_eq!(layout, ret_layout);
+                (bits << extra).reverse_bits()
+            }
             _ => bug!("not a numeric intrinsic: {}", name),
         };
-        Ok(Scalar::from_uint(bits_out, layout.size))
+        Ok(Scalar::from_uint(bits_out, ret_layout.size))
     }
 
     pub fn exact_div(
@@ -502,9 +518,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // Performs an exact division, resulting in undefined behavior where
         // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
         // First, check x % y != 0 (or if that computation overflows).
-        let (res, overflow) = self.overflowing_binary_op(BinOp::Rem, a, b)?;
-        assert!(!overflow); // All overflow is UB, so this should never return on overflow.
-        if res.to_scalar().assert_bits(a.layout.size) != 0 {
+        let rem = self.binary_op(BinOp::Rem, a, b)?;
+        if rem.to_scalar().assert_bits(a.layout.size) != 0 {
             throw_ub_custom!(
                 fluent::const_eval_exact_div_has_remainder,
                 a = format!("{a}"),
@@ -512,7 +527,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             )
         }
         // `Rem` says this is all right, so we can let `Div` do its job.
-        self.binop_ignore_overflow(BinOp::Div, a, b, &dest.clone().into())
+        let res = self.binary_op(BinOp::Div, a, b)?;
+        self.write_immediate(*res, dest)
     }
 
     pub fn saturating_arith(
@@ -525,8 +541,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         assert!(matches!(l.layout.ty.kind(), ty::Int(..) | ty::Uint(..)));
         assert!(matches!(mir_op, BinOp::Add | BinOp::Sub));
 
-        let (val, overflowed) = self.overflowing_binary_op(mir_op, l, r)?;
-        Ok(if overflowed {
+        let (val, overflowed) =
+            self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
+        Ok(if overflowed.to_bool()? {
             let size = l.layout.size;
             let num_bits = size.bits();
             if l.layout.abi.is_signed() {
@@ -557,7 +574,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 }
             }
         } else {
-            val.to_scalar()
+            val
         })
     }
 
@@ -588,13 +605,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
     pub(crate) fn copy_intrinsic(
         &mut self,
-        src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
-        dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
-        count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+        src: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
+        dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
+        count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
         nonoverlapping: bool,
     ) -> InterpResult<'tcx> {
         let count = self.read_target_usize(count)?;
-        let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
+        let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap())?;
         let (size, align) = (layout.size, layout.align.abi);
         // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
         // but no actual allocation can be big enough for the difference to be noticeable.
@@ -617,8 +634,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// Does a *typed* swap of `*left` and `*right`.
     fn typed_swap_intrinsic(
         &mut self,
-        left: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
-        right: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+        left: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
+        right: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
     ) -> InterpResult<'tcx> {
         let left = self.deref_pointer(left)?;
         let right = self.deref_pointer(right)?;
@@ -634,11 +651,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
     pub(crate) fn write_bytes_intrinsic(
         &mut self,
-        dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
-        byte: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
-        count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+        dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
+        byte: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
+        count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
     ) -> InterpResult<'tcx> {
-        let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap().ty)?;
+        let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap())?;
 
         let dst = self.read_pointer(dst)?;
         let byte = self.read_scalar(byte)?.to_u8()?;
@@ -656,9 +673,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
     pub(crate) fn compare_bytes_intrinsic(
         &mut self,
-        left: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
-        right: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
-        byte_count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+        left: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
+        right: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
+        byte_count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
     ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
         let left = self.read_pointer(left)?;
         let right = self.read_pointer(right)?;
@@ -674,14 +691,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
     pub(crate) fn raw_eq_intrinsic(
         &mut self,
-        lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
-        rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+        lhs: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
+        rhs: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
     ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
-        let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
+        let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap())?;
         assert!(layout.is_sized());
 
-        let get_bytes = |this: &InterpCx<'mir, 'tcx, M>,
-                         op: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+        let get_bytes = |this: &InterpCx<'tcx, M>,
+                         op: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
                          size|
          -> InterpResult<'tcx, &[u8]> {
             let ptr = this.read_pointer(op)?;
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index 7617cb57b3c..4ae0aca5a0c 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -18,9 +18,9 @@ use rustc_target::abi::{Align, Size};
 use rustc_target::spec::abi::Abi as CallAbi;
 
 use super::{
-    AllocBytes, AllocId, AllocKind, AllocRange, Allocation, ConstAllocation, CtfeProvenance, FnArg,
-    Frame, ImmTy, InterpCx, InterpResult, MPlaceTy, MemoryKind, Misalignment, OpTy, PlaceTy,
-    Pointer, Provenance,
+    throw_unsup, throw_unsup_format, AllocBytes, AllocId, AllocKind, AllocRange, Allocation,
+    ConstAllocation, CtfeProvenance, FnArg, Frame, ImmTy, InterpCx, InterpResult, MPlaceTy,
+    MemoryKind, Misalignment, OpTy, PlaceTy, Pointer, Provenance,
 };
 
 /// Data returned by Machine::stack_pop,
@@ -94,7 +94,7 @@ pub trait AllocMap<K: Hash + Eq, V> {
 
 /// Methods of this trait signifies a point where CTFE evaluation would fail
 /// and some use case dependent behaviour can instead be applied.
-pub trait Machine<'mir, 'tcx: 'mir>: Sized {
+pub trait Machine<'tcx>: Sized {
     /// Additional memory kinds a machine wishes to distinguish from the builtin ones
     type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static;
 
@@ -140,16 +140,17 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     /// Should the machine panic on allocation failures?
     const PANIC_ON_ALLOC_FAIL: bool;
 
-    /// Should post-monomorphization checks be run when a stack frame is pushed?
-    const POST_MONO_CHECKS: bool = true;
+    /// Determines whether `eval_mir_constant` can never fail because all required consts have
+    /// already been checked before.
+    const ALL_CONSTS_ARE_PRECHECKED: bool = true;
 
     /// Whether memory accesses should be alignment-checked.
-    fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+    fn enforce_alignment(ecx: &InterpCx<'tcx, Self>) -> bool;
 
     /// Gives the machine a chance to detect more misalignment than the built-in checks would catch.
     #[inline(always)]
     fn alignment_check(
-        _ecx: &InterpCx<'mir, 'tcx, Self>,
+        _ecx: &InterpCx<'tcx, Self>,
         _alloc_id: AllocId,
         _alloc_align: Align,
         _alloc_kind: AllocKind,
@@ -160,22 +161,22 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     }
 
     /// Whether to enforce the validity invariant for a specific layout.
-    fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool;
+    fn enforce_validity(ecx: &InterpCx<'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool;
 
     /// Whether function calls should be [ABI](CallAbi)-checked.
-    fn enforce_abi(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+    fn enforce_abi(_ecx: &InterpCx<'tcx, Self>) -> bool {
         true
     }
 
     /// Whether Assert(OverflowNeg) and Assert(Overflow) MIR terminators should actually
     /// check for overflow.
-    fn ignore_optional_overflow_checks(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+    fn ignore_optional_overflow_checks(_ecx: &InterpCx<'tcx, Self>) -> bool;
 
     /// Entry point for obtaining the MIR of anything that should get evaluated.
     /// So not just functions and shims, but also const/static initializers, anonymous
     /// constants, ...
     fn load_mir(
-        ecx: &InterpCx<'mir, 'tcx, Self>,
+        ecx: &InterpCx<'tcx, Self>,
         instance: ty::InstanceDef<'tcx>,
     ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
         Ok(ecx.tcx.instance_mir(instance))
@@ -192,19 +193,19 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     /// Passing `dest`and `ret` in the same `Option` proved very annoying when only one of them
     /// was used.
     fn find_mir_or_eval_fn(
-        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        ecx: &mut InterpCx<'tcx, Self>,
         instance: ty::Instance<'tcx>,
         abi: CallAbi,
         args: &[FnArg<'tcx, Self::Provenance>],
         destination: &MPlaceTy<'tcx, Self::Provenance>,
         target: Option<mir::BasicBlock>,
         unwind: mir::UnwindAction,
-    ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>>;
+    ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>>;
 
     /// Execute `fn_val`. It is the hook's responsibility to advance the instruction
     /// pointer as appropriate.
     fn call_extra_fn(
-        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        ecx: &mut InterpCx<'tcx, Self>,
         fn_val: Self::ExtraFnVal,
         abi: CallAbi,
         args: &[FnArg<'tcx, Self::Provenance>],
@@ -215,28 +216,31 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
 
     /// Directly process an intrinsic without pushing a stack frame. It is the hook's
     /// responsibility to advance the instruction pointer as appropriate.
+    ///
+    /// Returns `None` if the intrinsic was fully handled.
+    /// Otherwise, returns an `Instance` of the function that implements the intrinsic.
     fn call_intrinsic(
-        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        ecx: &mut InterpCx<'tcx, Self>,
         instance: ty::Instance<'tcx>,
         args: &[OpTy<'tcx, Self::Provenance>],
         destination: &MPlaceTy<'tcx, Self::Provenance>,
         target: Option<mir::BasicBlock>,
         unwind: mir::UnwindAction,
-    ) -> InterpResult<'tcx>;
+    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>>;
 
     /// Called to evaluate `Assert` MIR terminators that trigger a panic.
     fn assert_panic(
-        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        ecx: &mut InterpCx<'tcx, Self>,
         msg: &mir::AssertMessage<'tcx>,
         unwind: mir::UnwindAction,
     ) -> InterpResult<'tcx>;
 
     /// Called to trigger a non-unwinding panic.
-    fn panic_nounwind(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: &str) -> InterpResult<'tcx>;
+    fn panic_nounwind(_ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx>;
 
     /// Called when unwinding reached a state where execution should be terminated.
     fn unwind_terminate(
-        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        ecx: &mut InterpCx<'tcx, Self>,
         reason: mir::UnwindTerminateReason,
     ) -> InterpResult<'tcx>;
 
@@ -244,16 +248,16 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     ///
     /// Returns a (value, overflowed) pair if the operation succeeded
     fn binary_ptr_op(
-        ecx: &InterpCx<'mir, 'tcx, Self>,
+        ecx: &InterpCx<'tcx, Self>,
         bin_op: mir::BinOp,
         left: &ImmTy<'tcx, Self::Provenance>,
         right: &ImmTy<'tcx, Self::Provenance>,
-    ) -> InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)>;
+    ) -> InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>>;
 
     /// Generate the NaN returned by a float operation, given the list of inputs.
     /// (This is all inputs, not just NaN inputs!)
     fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>(
-        _ecx: &InterpCx<'mir, 'tcx, Self>,
+        _ecx: &InterpCx<'tcx, Self>,
         _inputs: &[F1],
     ) -> F2 {
         // By default we always return the preferred NaN.
@@ -262,14 +266,14 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
 
     /// Called before a basic block terminator is executed.
     #[inline]
-    fn before_terminator(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+    fn before_terminator(_ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
         Ok(())
     }
 
     /// Called when the interpreter encounters a `StatementKind::ConstEvalCounter` instruction.
     /// You can use this to detect long or endlessly running programs.
     #[inline]
-    fn increment_const_eval_counter(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+    fn increment_const_eval_counter(_ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
         Ok(())
     }
 
@@ -289,7 +293,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
 
     /// Return the `AllocId` for the given thread-local static in the current thread.
     fn thread_local_static_pointer(
-        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _ecx: &mut InterpCx<'tcx, Self>,
         def_id: DefId,
     ) -> InterpResult<'tcx, Pointer<Self::Provenance>> {
         throw_unsup!(ThreadLocalStatic(def_id))
@@ -297,20 +301,20 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
 
     /// Return the `AllocId` for the given `extern static`.
     fn extern_static_pointer(
-        ecx: &InterpCx<'mir, 'tcx, Self>,
+        ecx: &InterpCx<'tcx, Self>,
         def_id: DefId,
     ) -> InterpResult<'tcx, Pointer<Self::Provenance>>;
 
     /// "Int-to-pointer cast"
     fn ptr_from_addr_cast(
-        ecx: &InterpCx<'mir, 'tcx, Self>,
+        ecx: &InterpCx<'tcx, Self>,
         addr: u64,
     ) -> InterpResult<'tcx, Pointer<Option<Self::Provenance>>>;
 
     /// Marks a pointer as exposed, allowing it's provenance
     /// to be recovered. "Pointer-to-int cast"
     fn expose_ptr(
-        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        ecx: &mut InterpCx<'tcx, Self>,
         ptr: Pointer<Self::Provenance>,
     ) -> InterpResult<'tcx>;
 
@@ -321,31 +325,45 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     ///
     /// When this fails, that means the pointer does not point to a live allocation.
     fn ptr_get_alloc(
-        ecx: &InterpCx<'mir, 'tcx, Self>,
+        ecx: &InterpCx<'tcx, Self>,
         ptr: Pointer<Self::Provenance>,
     ) -> Option<(AllocId, Size, Self::ProvenanceExtra)>;
 
-    /// Called to adjust allocations to the Provenance and AllocExtra of this machine.
+    /// Called to adjust global allocations to the Provenance and AllocExtra of this machine.
     ///
     /// If `alloc` contains pointers, then they are all pointing to globals.
     ///
-    /// The way we construct allocations is to always first construct it without extra and then add
-    /// the extra. This keeps uniform code paths for handling both allocations created by CTFE for
-    /// globals, and allocations created by Miri during evaluation.
-    ///
-    /// `kind` is the kind of the allocation being adjusted; it can be `None` when
-    /// it's a global and `GLOBAL_KIND` is `None`.
-    ///
     /// This should avoid copying if no work has to be done! If this returns an owned
     /// allocation (because a copy had to be done to adjust things), machine memory will
     /// cache the result. (This relies on `AllocMap::get_or` being able to add the
     /// owned allocation to the map even when the map is shared.)
-    fn adjust_allocation<'b>(
-        ecx: &InterpCx<'mir, 'tcx, Self>,
+    fn adjust_global_allocation<'b>(
+        ecx: &InterpCx<'tcx, Self>,
         id: AllocId,
-        alloc: Cow<'b, Allocation>,
-        kind: Option<MemoryKind<Self::MemoryKind>>,
-    ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>;
+        alloc: &'b Allocation,
+    ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>
+    {
+        // The default implementation does a copy; CTFE machines have a more efficient implementation
+        // based on their particular choice for `Provenance`, `AllocExtra`, and `Bytes`.
+        let kind = Self::GLOBAL_KIND
+            .expect("if GLOBAL_KIND is None, adjust_global_allocation must be overwritten");
+        let alloc = alloc.adjust_from_tcx(&ecx.tcx, |ptr| ecx.global_root_pointer(ptr))?;
+        let extra =
+            Self::init_alloc_extra(ecx, id, MemoryKind::Machine(kind), alloc.size(), alloc.align)?;
+        Ok(Cow::Owned(alloc.with_extra(extra)))
+    }
+
+    /// Initialize the extra state of an allocation.
+    ///
+    /// This is guaranteed to be called exactly once on all allocations that are accessed by the
+    /// program.
+    fn init_alloc_extra(
+        ecx: &InterpCx<'tcx, Self>,
+        id: AllocId,
+        kind: MemoryKind<Self::MemoryKind>,
+        size: Size,
+        align: Align,
+    ) -> InterpResult<'tcx, Self::AllocExtra>;
 
     /// Return a "root" pointer for the given allocation: the one that is used for direct
     /// accesses to this static/const/fn allocation, or the one returned from the heap allocator.
@@ -355,7 +373,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     /// `kind` is the kind of the allocation the pointer points to; it can be `None` when
     /// it's a global and `GLOBAL_KIND` is `None`.
     fn adjust_alloc_root_pointer(
-        ecx: &InterpCx<'mir, 'tcx, Self>,
+        ecx: &InterpCx<'tcx, Self>,
         ptr: Pointer,
         kind: Option<MemoryKind<Self::MemoryKind>>,
     ) -> InterpResult<'tcx, Pointer<Self::Provenance>>;
@@ -366,7 +384,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     /// is triggered, `targets[0]` when the assembly falls through, or diverge in case of
     /// `InlineAsmOptions::NORETURN` being set.
     fn eval_inline_asm(
-        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _ecx: &mut InterpCx<'tcx, Self>,
         _template: &'tcx [InlineAsmTemplatePiece],
         _operands: &[mir::InlineAsmOperand<'tcx>],
         _options: InlineAsmOptions,
@@ -402,10 +420,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     ///
     /// Used to prevent statics from self-initializing by reading from their own memory
     /// as it is being initialized.
-    fn before_alloc_read(
-        _ecx: &InterpCx<'mir, 'tcx, Self>,
-        _alloc_id: AllocId,
-    ) -> InterpResult<'tcx> {
+    fn before_alloc_read(_ecx: &InterpCx<'tcx, Self>, _alloc_id: AllocId) -> InterpResult<'tcx> {
         Ok(())
     }
 
@@ -440,7 +455,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     /// Returns the possibly adjusted pointer.
     #[inline]
     fn retag_ptr_value(
-        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _ecx: &mut InterpCx<'tcx, Self>,
         _kind: mir::RetagKind,
         val: &ImmTy<'tcx, Self::Provenance>,
     ) -> InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>> {
@@ -451,7 +466,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     /// Replaces all pointers stored in the given place.
     #[inline]
     fn retag_place_contents(
-        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _ecx: &mut InterpCx<'tcx, Self>,
         _kind: mir::RetagKind,
         _place: &PlaceTy<'tcx, Self::Provenance>,
     ) -> InterpResult<'tcx> {
@@ -463,7 +478,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     /// These places need to be protected to make sure the program cannot tell whether the
     /// argument/return value was actually copied or passed in-place..
     fn protect_in_place_function_argument(
-        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        ecx: &mut InterpCx<'tcx, Self>,
         mplace: &MPlaceTy<'tcx, Self::Provenance>,
     ) -> InterpResult<'tcx> {
         // Without an aliasing model, all we can do is put `Uninit` into the place.
@@ -472,30 +487,30 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     }
 
     /// Called immediately before a new stack frame gets pushed.
-    fn init_frame_extra(
-        ecx: &mut InterpCx<'mir, 'tcx, Self>,
-        frame: Frame<'mir, 'tcx, Self::Provenance>,
-    ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>;
+    fn init_frame(
+        ecx: &mut InterpCx<'tcx, Self>,
+        frame: Frame<'tcx, Self::Provenance>,
+    ) -> InterpResult<'tcx, Frame<'tcx, Self::Provenance, Self::FrameExtra>>;
 
     /// Borrow the current thread's stack.
     fn stack<'a>(
-        ecx: &'a InterpCx<'mir, 'tcx, Self>,
-    ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>];
+        ecx: &'a InterpCx<'tcx, Self>,
+    ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>];
 
     /// Mutably borrow the current thread's stack.
     fn stack_mut<'a>(
-        ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
-    ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>;
+        ecx: &'a mut InterpCx<'tcx, Self>,
+    ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>>;
 
     /// Called immediately after a stack frame got pushed and its locals got initialized.
-    fn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+    fn after_stack_push(_ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
         Ok(())
     }
 
     /// Called just before the return value is copied to the caller-provided return place.
     fn before_stack_pop(
-        _ecx: &InterpCx<'mir, 'tcx, Self>,
-        _frame: &Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
+        _ecx: &InterpCx<'tcx, Self>,
+        _frame: &Frame<'tcx, Self::Provenance, Self::FrameExtra>,
     ) -> InterpResult<'tcx> {
         Ok(())
     }
@@ -504,8 +519,8 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     /// The `locals` have already been destroyed!
     #[inline(always)]
     fn after_stack_pop(
-        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
-        _frame: Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
+        _ecx: &mut InterpCx<'tcx, Self>,
+        _frame: Frame<'tcx, Self::Provenance, Self::FrameExtra>,
         unwinding: bool,
     ) -> InterpResult<'tcx, StackPopJump> {
         // By default, we do not support unwinding from panics
@@ -517,7 +532,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     /// but before the local's stack frame is updated to point to that memory.
     #[inline(always)]
     fn after_local_allocated(
-        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _ecx: &mut InterpCx<'tcx, Self>,
         _local: mir::Local,
         _mplace: &MPlaceTy<'tcx, Self::Provenance>,
     ) -> InterpResult<'tcx> {
@@ -528,7 +543,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     /// but this hook has the chance to do some pre/postprocessing.
     #[inline(always)]
     fn eval_mir_constant<F>(
-        ecx: &InterpCx<'mir, 'tcx, Self>,
+        ecx: &InterpCx<'tcx, Self>,
         val: mir::Const<'tcx>,
         span: Span,
         layout: Option<TyAndLayout<'tcx>>,
@@ -536,7 +551,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
     ) -> InterpResult<'tcx, OpTy<'tcx, Self::Provenance>>
     where
         F: Fn(
-            &InterpCx<'mir, 'tcx, Self>,
+            &InterpCx<'tcx, Self>,
             mir::Const<'tcx>,
             Span,
             Option<TyAndLayout<'tcx>>,
@@ -548,7 +563,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
 
 /// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
 /// (CTFE and ConstProp) use the same instance. Here, we share that code.
-pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
+pub macro compile_time_machine(<$tcx: lifetime>) {
     type Provenance = CtfeProvenance;
     type ProvenanceExtra = bool; // the "immutable" flag
 
@@ -563,13 +578,13 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
     type Bytes = Box<[u8]>;
 
     #[inline(always)]
-    fn ignore_optional_overflow_checks(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+    fn ignore_optional_overflow_checks(_ecx: &InterpCx<$tcx, Self>) -> bool {
         false
     }
 
     #[inline(always)]
     fn unwind_terminate(
-        _ecx: &mut InterpCx<$mir, $tcx, Self>,
+        _ecx: &mut InterpCx<$tcx, Self>,
         _reason: mir::UnwindTerminateReason,
     ) -> InterpResult<$tcx> {
         unreachable!("unwinding cannot happen during compile-time evaluation")
@@ -577,7 +592,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
 
     #[inline(always)]
     fn call_extra_fn(
-        _ecx: &mut InterpCx<$mir, $tcx, Self>,
+        _ecx: &mut InterpCx<$tcx, Self>,
         fn_val: !,
         _abi: CallAbi,
         _args: &[FnArg<$tcx>],
@@ -589,17 +604,27 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
     }
 
     #[inline(always)]
-    fn adjust_allocation<'b>(
-        _ecx: &InterpCx<$mir, $tcx, Self>,
+    fn adjust_global_allocation<'b>(
+        _ecx: &InterpCx<$tcx, Self>,
         _id: AllocId,
-        alloc: Cow<'b, Allocation>,
-        _kind: Option<MemoryKind<Self::MemoryKind>>,
+        alloc: &'b Allocation,
     ) -> InterpResult<$tcx, Cow<'b, Allocation<Self::Provenance>>> {
-        Ok(alloc)
+        // Overwrite default implementation: no need to adjust anything.
+        Ok(Cow::Borrowed(alloc))
+    }
+
+    fn init_alloc_extra(
+        _ecx: &InterpCx<$tcx, Self>,
+        _id: AllocId,
+        _kind: MemoryKind<Self::MemoryKind>,
+        _size: Size,
+        _align: Align,
+    ) -> InterpResult<$tcx, Self::AllocExtra> {
+        Ok(())
     }
 
     fn extern_static_pointer(
-        ecx: &InterpCx<$mir, $tcx, Self>,
+        ecx: &InterpCx<$tcx, Self>,
         def_id: DefId,
     ) -> InterpResult<$tcx, Pointer> {
         // Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
@@ -608,7 +633,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
 
     #[inline(always)]
     fn adjust_alloc_root_pointer(
-        _ecx: &InterpCx<$mir, $tcx, Self>,
+        _ecx: &InterpCx<$tcx, Self>,
         ptr: Pointer<CtfeProvenance>,
         _kind: Option<MemoryKind<Self::MemoryKind>>,
     ) -> InterpResult<$tcx, Pointer<CtfeProvenance>> {
@@ -617,7 +642,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
 
     #[inline(always)]
     fn ptr_from_addr_cast(
-        _ecx: &InterpCx<$mir, $tcx, Self>,
+        _ecx: &InterpCx<$tcx, Self>,
         addr: u64,
     ) -> InterpResult<$tcx, Pointer<Option<CtfeProvenance>>> {
         // Allow these casts, but make the pointer not dereferenceable.
@@ -628,7 +653,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
 
     #[inline(always)]
     fn ptr_get_alloc(
-        _ecx: &InterpCx<$mir, $tcx, Self>,
+        _ecx: &InterpCx<$tcx, Self>,
         ptr: Pointer<CtfeProvenance>,
     ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
         // We know `offset` is relative to the allocation, so we can use `into_parts`.
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 594e3b3212f..521f28b7123 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -16,16 +16,20 @@ use std::ptr;
 use rustc_ast::Mutability;
 use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
 use rustc_hir::def::DefKind;
+use rustc_middle::bug;
 use rustc_middle::mir::display_allocation;
 use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TyCtxt};
 use rustc_target::abi::{Align, HasDataLayout, Size};
 
+use tracing::{debug, instrument, trace};
+
 use crate::fluent_generated as fluent;
 
 use super::{
-    alloc_range, AllocBytes, AllocId, AllocMap, AllocRange, Allocation, CheckAlignMsg,
-    CheckInAllocMsg, CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak,
-    Misalignment, Pointer, PointerArithmetic, Provenance, Scalar,
+    alloc_range, err_ub, err_ub_custom, throw_ub, throw_ub_custom, throw_unsup, throw_unsup_format,
+    AllocBytes, AllocId, AllocMap, AllocRange, Allocation, CheckAlignMsg, CheckInAllocMsg,
+    CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak, Misalignment, Pointer,
+    PointerArithmetic, Provenance, Scalar,
 };
 
 #[derive(Debug, PartialEq, Copy, Clone)]
@@ -92,7 +96,7 @@ impl<'tcx, Other> FnVal<'tcx, Other> {
 
 // `Memory` has to depend on the `Machine` because some of its operations
 // (e.g., `get`) call a `Machine` hook.
-pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+pub struct Memory<'tcx, M: Machine<'tcx>> {
     /// Allocations local to this instance of the interpreter. The kind
     /// helps ensure that the same mechanism is used for allocation and
     /// deallocation. When an allocation is not found here, it is a
@@ -138,7 +142,7 @@ pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Bo
     alloc_id: AllocId,
 }
 
-impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> Memory<'tcx, M> {
     pub fn new() -> Self {
         Memory {
             alloc_map: M::MemoryMap::default(),
@@ -154,7 +158,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
     }
 }
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
     /// the machine pointer to the allocation. Must never be used
     /// for any other pointers, nor for TLS statics.
@@ -235,7 +239,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
     pub fn allocate_raw_ptr(
         &mut self,
-        alloc: Allocation,
+        alloc: Allocation<M::Provenance, (), M::Bytes>,
         kind: MemoryKind<M::MemoryKind>,
     ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
         let id = self.tcx.reserve_alloc_id();
@@ -244,8 +248,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             M::GLOBAL_KIND.map(MemoryKind::Machine),
             "dynamically allocating global memory"
         );
-        let alloc = M::adjust_allocation(self, id, Cow::Owned(alloc), Some(kind))?;
-        self.memory.alloc_map.insert(id, (kind, alloc.into_owned()));
+        // We have set things up so we don't need to call `adjust_from_tcx` here,
+        // so we avoid copying the entire allocation contents.
+        let extra = M::init_alloc_extra(self, id, kind, alloc.size(), alloc.align)?;
+        let alloc = alloc.with_extra(extra);
+        self.memory.alloc_map.insert(id, (kind, alloc));
         M::adjust_alloc_root_pointer(self, Pointer::from(id), Some(kind))
     }
 
@@ -411,6 +418,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// to the allocation it points to. Supports both shared and mutable references, as the actual
     /// checking is offloaded to a helper closure.
     ///
+    /// `alloc_size` will only get called for non-zero-sized accesses.
+    ///
     /// Returns `None` if and only if the size is 0.
     fn check_and_deref_ptr<T>(
         &self,
@@ -423,18 +432,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             M::ProvenanceExtra,
         ) -> InterpResult<'tcx, (Size, Align, T)>,
     ) -> InterpResult<'tcx, Option<T>> {
+        // Everything is okay with size 0.
+        if size.bytes() == 0 {
+            return Ok(None);
+        }
+
         Ok(match self.ptr_try_get_alloc_id(ptr) {
             Err(addr) => {
-                // We couldn't get a proper allocation. This is only okay if the access size is 0,
-                // and the address is not null.
-                if size.bytes() > 0 || addr == 0 {
-                    throw_ub!(DanglingIntPointer(addr, msg));
-                }
-                None
+                // We couldn't get a proper allocation.
+                throw_ub!(DanglingIntPointer(addr, msg));
             }
             Ok((alloc_id, offset, prov)) => {
                 let (alloc_size, _alloc_align, ret_val) = alloc_size(alloc_id, offset, prov)?;
-                // Test bounds. This also ensures non-null.
+                // Test bounds.
                 // It is sufficient to check this for the end pointer. Also check for overflow!
                 if offset.checked_add(size, &self.tcx).map_or(true, |end| end > alloc_size) {
                     throw_ub!(PointerOutOfBounds {
@@ -445,14 +455,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         msg,
                     })
                 }
-                // Ensure we never consider the null pointer dereferenceable.
-                if M::Provenance::OFFSET_IS_ADDR {
-                    assert_ne!(ptr.addr(), Size::ZERO);
-                }
 
-                // We can still be zero-sized in this branch, in which case we have to
-                // return `None`.
-                if size.bytes() == 0 { None } else { Some(ret_val) }
+                Some(ret_val)
             }
         })
     }
@@ -523,7 +527,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     }
 }
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// This function is used by Miri's provenance GC to remove unreachable entries from the dead_alloc_map.
     pub fn remove_unreachable_allocs(&mut self, reachable_allocs: &FxHashSet<AllocId>) {
         // Unlike all the other GC helpers where we check if an `AllocId` is found in the interpreter or
@@ -535,7 +539,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 }
 
 /// Allocation accessors
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// Helper function to obtain a global (tcx) allocation.
     /// This attempts to return a reference to an existing allocation if
     /// one can be found in `tcx`. That, however, is only possible if `tcx` and
@@ -582,11 +586,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         };
         M::before_access_global(self.tcx, &self.machine, id, alloc, def_id, is_write)?;
         // We got tcx memory. Let the machine initialize its "extra" stuff.
-        M::adjust_allocation(
+        M::adjust_global_allocation(
             self,
             id, // always use the ID we got as input, not the "hidden" one.
-            Cow::Borrowed(alloc.inner()),
-            M::GLOBAL_KIND.map(MemoryKind::Machine),
+            alloc.inner(),
         )
     }
 
@@ -639,16 +642,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             size,
             CheckInAllocMsg::MemoryAccessTest,
             |alloc_id, offset, prov| {
-                if !self.memory.validation_in_progress.get() {
-                    // We want to call the hook on *all* accesses that involve an AllocId,
-                    // including zero-sized accesses. That means we have to do it here
-                    // rather than below in the `Some` branch.
-                    M::before_alloc_read(self, alloc_id)?;
-                }
                 let alloc = self.get_alloc_raw(alloc_id)?;
                 Ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
             },
         )?;
+        // We want to call the hook on *all* accesses that involve an AllocId, including zero-sized
+        // accesses. That means we cannot rely on the closure above or the `Some` branch below. We
+        // do this after `check_and_deref_ptr` to ensure some basic sanity has already been checked.
+        if !self.memory.validation_in_progress.get() {
+            if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(ptr) {
+                M::before_alloc_read(self, alloc_id)?;
+            }
+        }
 
         if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
             let range = alloc_range(offset, size);
@@ -885,14 +890,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// Create a lazy debug printer that prints the given allocation and all allocations it points
     /// to, recursively.
     #[must_use]
-    pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'mir, 'tcx, M> {
+    pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'tcx, M> {
         self.dump_allocs(vec![id])
     }
 
     /// Create a lazy debug printer for a list of allocations and all allocations they point to,
     /// recursively.
     #[must_use]
-    pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'mir, 'tcx, M> {
+    pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'tcx, M> {
         allocs.sort();
         allocs.dedup();
         DumpAllocs { ecx: self, allocs }
@@ -972,12 +977,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
 #[doc(hidden)]
 /// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
-pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
-    ecx: &'a InterpCx<'mir, 'tcx, M>,
+pub struct DumpAllocs<'a, 'tcx, M: Machine<'tcx>> {
+    ecx: &'a InterpCx<'tcx, M>,
     allocs: Vec<AllocId>,
 }
 
-impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
+impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for DumpAllocs<'a, 'tcx, M> {
     fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
         // Cannot be a closure because it is generic in `Prov`, `Extra`.
         fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
@@ -1122,7 +1127,7 @@ impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Pr
     }
 }
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// Reads the given number of bytes from memory, and strips their provenance if possible.
     /// Returns them as a slice.
     ///
@@ -1335,7 +1340,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 }
 
 /// Machine pointer introspection.
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// Test if this value might be null.
     /// If the machine does not support ptr-to-int casts, this is conservative.
     pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index 474d35b2aa3..7d7b421f869 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -18,11 +18,13 @@ mod util;
 mod validity;
 mod visitor;
 
+#[doc(no_inline)]
 pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here
 
 pub use self::eval_context::{format_interp_error, Frame, FrameInfo, InterpCx, StackPopCleanup};
 pub use self::intern::{
     intern_const_alloc_for_constprop, intern_const_alloc_recursive, HasStaticRootDefId, InternKind,
+    InternResult,
 };
 pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
 pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index c120154ce2a..bbb2c2f3938 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -4,18 +4,21 @@
 use std::assert_matches::assert_matches;
 
 use either::{Either, Left, Right};
+use tracing::trace;
 
 use rustc_hir::def::Namespace;
-use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::mir::interpret::ScalarSizeMismatch;
+use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutOf, TyAndLayout};
 use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
-use rustc_middle::ty::{ConstInt, Ty, TyCtxt};
+use rustc_middle::ty::{ConstInt, ScalarInt, Ty, TyCtxt};
+use rustc_middle::{bug, span_bug};
 use rustc_middle::{mir, ty};
 use rustc_target::abi::{self, Abi, HasDataLayout, Size};
 
 use super::{
-    alloc_range, from_known_layout, mir_assign_valid_types, CtfeProvenance, InterpCx, InterpResult,
-    MPlaceTy, Machine, MemPlace, MemPlaceMeta, OffsetMode, PlaceTy, Pointer, Projectable,
-    Provenance, Scalar,
+    alloc_range, err_ub, from_known_layout, mir_assign_valid_types, throw_ub, CtfeProvenance,
+    InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, OffsetMode, PlaceTy,
+    Pointer, Projectable, Provenance, Scalar,
 };
 
 /// An `Immediate` represents a single immediate self-contained Rust value.
@@ -211,6 +214,12 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
     }
 
     #[inline]
+    pub fn from_scalar_int(s: ScalarInt, layout: TyAndLayout<'tcx>) -> Self {
+        assert_eq!(s.size(), layout.size);
+        Self::from_scalar(Scalar::from(s), layout)
+    }
+
+    #[inline]
     pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
         Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
     }
@@ -223,7 +232,6 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
     pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
         Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
     }
-
     #[inline]
     pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
         Self::from_scalar(Scalar::from_int(i, layout.size), layout)
@@ -242,6 +250,29 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
         Self::from_scalar(Scalar::from_i8(c as i8), layout)
     }
 
+    pub fn from_pair(a: Self, b: Self, tcx: TyCtxt<'tcx>) -> Self {
+        let layout = tcx
+            .layout_of(
+                ty::ParamEnv::reveal_all().and(Ty::new_tup(tcx, &[a.layout.ty, b.layout.ty])),
+            )
+            .unwrap();
+        Self::from_scalar_pair(a.to_scalar(), b.to_scalar(), layout)
+    }
+
+    /// Return the immediate as a `ScalarInt`. Ensures that it has the size that the layout of the
+    /// immediate indicates.
+    #[inline]
+    pub fn to_scalar_int(&self) -> InterpResult<'tcx, ScalarInt> {
+        let s = self.to_scalar().to_scalar_int()?;
+        if s.size() != self.layout.size {
+            throw_ub!(ScalarSizeMismatch(ScalarSizeMismatch {
+                target_size: self.layout.size.bytes(),
+                data_size: s.size().bytes(),
+            }));
+        }
+        Ok(s)
+    }
+
     #[inline]
     pub fn to_const_int(self) -> ConstInt {
         assert!(self.layout.ty.is_integral());
@@ -249,6 +280,17 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
         ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
     }
 
+    #[inline]
+    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+    pub fn to_pair(self, cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>)) -> (Self, Self) {
+        let layout = self.layout;
+        let (val0, val1) = self.to_scalar_pair();
+        (
+            ImmTy::from_scalar(val0, layout.field(cx, 0)),
+            ImmTy::from_scalar(val1, layout.field(cx, 1)),
+        )
+    }
+
     /// Compute the "sub-immediate" that is located within the `base` at the given offset with the
     /// given layout.
     // Not called `offset` to avoid confusion with the trait method.
@@ -332,21 +374,21 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
         MemPlaceMeta::None
     }
 
-    fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn offset_with_meta<M: Machine<'tcx, Provenance = Prov>>(
         &self,
         offset: Size,
         _mode: OffsetMode,
         meta: MemPlaceMeta<Prov>,
         layout: TyAndLayout<'tcx>,
-        ecx: &InterpCx<'mir, 'tcx, M>,
+        ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, Self> {
         assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway
         Ok(self.offset_(offset, layout, ecx))
     }
 
-    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn to_op<M: Machine<'tcx, Provenance = Prov>>(
         &self,
-        _ecx: &InterpCx<'mir, 'tcx, M>,
+        _ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
         Ok(self.clone().into())
     }
@@ -415,13 +457,13 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
         }
     }
 
-    fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn offset_with_meta<M: Machine<'tcx, Provenance = Prov>>(
         &self,
         offset: Size,
         mode: OffsetMode,
         meta: MemPlaceMeta<Prov>,
         layout: TyAndLayout<'tcx>,
-        ecx: &InterpCx<'mir, 'tcx, M>,
+        ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, Self> {
         match self.as_mplace_or_imm() {
             Left(mplace) => Ok(mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into()),
@@ -433,9 +475,9 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
         }
     }
 
-    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn to_op<M: Machine<'tcx, Provenance = Prov>>(
         &self,
-        _ecx: &InterpCx<'mir, 'tcx, M>,
+        _ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
         Ok(self.clone())
     }
@@ -467,7 +509,7 @@ impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for ImmTy<'tcx, Prov> {
     }
 }
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
     /// Returns `None` if the layout does not permit loading this as a value.
     ///
@@ -792,7 +834,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 }
 
 // Some nodes are used a lot. Make sure they don't unintentionally get bigger.
-#[cfg(all(any(target_arch = "x86_64", target_arch = "aarch64"), target_pointer_width = "64"))]
+#[cfg(target_pointer_width = "64")]
 mod size_asserts {
     use super::*;
     use rustc_data_structures::static_assert_size;
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index 5665bb4999f..6d005dfcd86 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -1,77 +1,23 @@
+use either::Either;
+
 use rustc_apfloat::{Float, FloatConvert};
 use rustc_middle::mir;
 use rustc_middle::mir::interpret::{InterpResult, Scalar};
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
-use rustc_middle::ty::{self, FloatTy, Ty};
+use rustc_middle::ty::{self, FloatTy, ScalarInt};
+use rustc_middle::{bug, span_bug};
 use rustc_span::symbol::sym;
-use rustc_target::abi::Abi;
-
-use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
-
-use crate::fluent_generated as fluent;
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
-    /// Applies the binary operation `op` to the two operands and writes a tuple of the result
-    /// and a boolean signifying the potential overflow to the destination.
-    pub fn binop_with_overflow(
-        &mut self,
-        op: mir::BinOp,
-        left: &ImmTy<'tcx, M::Provenance>,
-        right: &ImmTy<'tcx, M::Provenance>,
-        dest: &PlaceTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx> {
-        let (val, overflowed) = self.overflowing_binary_op(op, left, right)?;
-        debug_assert_eq!(
-            Ty::new_tup(self.tcx.tcx, &[val.layout.ty, self.tcx.types.bool]),
-            dest.layout.ty,
-            "type mismatch for result of {op:?}",
-        );
-        // Write the result to `dest`.
-        if let Abi::ScalarPair(..) = dest.layout.abi {
-            // We can use the optimized path and avoid `place_field` (which might do
-            // `force_allocation`).
-            let pair = Immediate::ScalarPair(val.to_scalar(), Scalar::from_bool(overflowed));
-            self.write_immediate(pair, dest)?;
-        } else {
-            assert!(self.tcx.sess.opts.unstable_opts.randomize_layout);
-            // With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to
-            // do a component-wise write here. This code path is slower than the above because
-            // `place_field` will have to `force_allocate` locals here.
-            let val_field = self.project_field(dest, 0)?;
-            self.write_scalar(val.to_scalar(), &val_field)?;
-            let overflowed_field = self.project_field(dest, 1)?;
-            self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
-        }
-        Ok(())
-    }
+use tracing::trace;
 
-    /// Applies the binary operation `op` to the arguments and writes the result to the
-    /// destination.
-    pub fn binop_ignore_overflow(
-        &mut self,
-        op: mir::BinOp,
-        left: &ImmTy<'tcx, M::Provenance>,
-        right: &ImmTy<'tcx, M::Provenance>,
-        dest: &PlaceTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx> {
-        let val = self.wrapping_binary_op(op, left, right)?;
-        assert_eq!(val.layout.ty, dest.layout.ty, "type mismatch for result of {op:?}");
-        self.write_immediate(*val, dest)
-    }
-}
+use super::{err_ub, throw_ub, ImmTy, InterpCx, Machine, MemPlaceMeta};
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
-    fn three_way_compare<T: Ord>(&self, lhs: T, rhs: T) -> (ImmTy<'tcx, M::Provenance>, bool) {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
+    fn three_way_compare<T: Ord>(&self, lhs: T, rhs: T) -> ImmTy<'tcx, M::Provenance> {
         let res = Ord::cmp(&lhs, &rhs);
-        return (ImmTy::from_ordering(res, *self.tcx), false);
+        return ImmTy::from_ordering(res, *self.tcx);
     }
 
-    fn binary_char_op(
-        &self,
-        bin_op: mir::BinOp,
-        l: char,
-        r: char,
-    ) -> (ImmTy<'tcx, M::Provenance>, bool) {
+    fn binary_char_op(&self, bin_op: mir::BinOp, l: char, r: char) -> ImmTy<'tcx, M::Provenance> {
         use rustc_middle::mir::BinOp::*;
 
         if bin_op == Cmp {
@@ -87,15 +33,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             Ge => l >= r,
             _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
         };
-        (ImmTy::from_bool(res, *self.tcx), false)
+        ImmTy::from_bool(res, *self.tcx)
     }
 
-    fn binary_bool_op(
-        &self,
-        bin_op: mir::BinOp,
-        l: bool,
-        r: bool,
-    ) -> (ImmTy<'tcx, M::Provenance>, bool) {
+    fn binary_bool_op(&self, bin_op: mir::BinOp, l: bool, r: bool) -> ImmTy<'tcx, M::Provenance> {
         use rustc_middle::mir::BinOp::*;
 
         let res = match bin_op {
@@ -110,7 +51,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             BitXor => l ^ r,
             _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
         };
-        (ImmTy::from_bool(res, *self.tcx), false)
+        ImmTy::from_bool(res, *self.tcx)
     }
 
     fn binary_float_op<F: Float + FloatConvert<F> + Into<Scalar<M::Provenance>>>(
@@ -119,14 +60,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         layout: TyAndLayout<'tcx>,
         l: F,
         r: F,
-    ) -> (ImmTy<'tcx, M::Provenance>, bool) {
+    ) -> ImmTy<'tcx, M::Provenance> {
         use rustc_middle::mir::BinOp::*;
 
         // Performs appropriate non-deterministic adjustments of NaN results.
         let adjust_nan =
             |f: F| -> F { if f.is_nan() { M::generate_nan(self, &[l, r]) } else { f } };
 
-        let val = match bin_op {
+        match bin_op {
             Eq => ImmTy::from_bool(l == r, *self.tcx),
             Ne => ImmTy::from_bool(l != r, *self.tcx),
             Lt => ImmTy::from_bool(l < r, *self.tcx),
@@ -139,21 +80,26 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             Div => ImmTy::from_scalar(adjust_nan((l / r).value).into(), layout),
             Rem => ImmTy::from_scalar(adjust_nan((l % r).value).into(), layout),
             _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
-        };
-        (val, false)
+        }
     }
 
     fn binary_int_op(
         &self,
         bin_op: mir::BinOp,
-        // passing in raw bits
-        l: u128,
-        left_layout: TyAndLayout<'tcx>,
-        r: u128,
-        right_layout: TyAndLayout<'tcx>,
-    ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
+        left: &ImmTy<'tcx, M::Provenance>,
+        right: &ImmTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
         use rustc_middle::mir::BinOp::*;
 
+        // This checks the size, so that we can just assert it below.
+        let l = left.to_scalar_int()?;
+        let r = right.to_scalar_int()?;
+        // Prepare to convert the values to signed or unsigned form.
+        let l_signed = || l.assert_int(left.layout.size);
+        let l_unsigned = || l.assert_uint(left.layout.size);
+        let r_signed = || r.assert_int(right.layout.size);
+        let r_unsigned = || r.assert_uint(right.layout.size);
+
         let throw_ub_on_overflow = match bin_op {
             AddUnchecked => Some(sym::unchecked_add),
             SubUnchecked => Some(sym::unchecked_sub),
@@ -162,72 +108,76 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             ShrUnchecked => Some(sym::unchecked_shr),
             _ => None,
         };
+        let with_overflow = bin_op.is_overflowing();
 
         // Shift ops can have an RHS with a different numeric type.
         if matches!(bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked) {
-            let size = left_layout.size.bits();
-            // The shift offset is implicitly masked to the type size. (This is the one MIR operator
-            // that does *not* directly map to a single LLVM operation.) Compute how much we
-            // actually shift and whether there was an overflow due to shifting too much.
-            let (shift_amount, overflow) = if right_layout.abi.is_signed() {
-                let shift_amount = self.sign_extend(r, right_layout) as i128;
+            let size = left.layout.size.bits();
+            // Compute the equivalent shift modulo `size` that is in the range `0..size`. (This is
+            // the one MIR operator that does *not* directly map to a single LLVM operation.)
+            let (shift_amount, overflow) = if right.layout.abi.is_signed() {
+                let shift_amount = r_signed();
                 let overflow = shift_amount < 0 || shift_amount >= i128::from(size);
+                // Deliberately wrapping `as` casts: shift_amount *can* be negative, but the result
+                // of the `as` will be equal modulo `size` (since it is a power of two).
                 let masked_amount = (shift_amount as u128) % u128::from(size);
-                debug_assert_eq!(overflow, shift_amount != (masked_amount as i128));
+                assert_eq!(overflow, shift_amount != i128::try_from(masked_amount).unwrap());
                 (masked_amount, overflow)
             } else {
-                let shift_amount = r;
+                let shift_amount = r_unsigned();
+                let overflow = shift_amount >= u128::from(size);
                 let masked_amount = shift_amount % u128::from(size);
-                (masked_amount, shift_amount != masked_amount)
+                assert_eq!(overflow, shift_amount != masked_amount);
+                (masked_amount, overflow)
             };
             let shift_amount = u32::try_from(shift_amount).unwrap(); // we masked so this will always fit
             // Compute the shifted result.
-            let result = if left_layout.abi.is_signed() {
-                let l = self.sign_extend(l, left_layout) as i128;
+            let result = if left.layout.abi.is_signed() {
+                let l = l_signed();
                 let result = match bin_op {
                     Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
                     Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(),
                     _ => bug!(),
                 };
-                result as u128
+                ScalarInt::truncate_from_int(result, left.layout.size).0
             } else {
-                match bin_op {
+                let l = l_unsigned();
+                let result = match bin_op {
                     Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
                     Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(),
                     _ => bug!(),
-                }
+                };
+                ScalarInt::truncate_from_uint(result, left.layout.size).0
             };
-            let truncated = self.truncate(result, left_layout);
 
-            if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
-                throw_ub_custom!(
-                    fluent::const_eval_overflow_shift,
-                    val = if right_layout.abi.is_signed() {
-                        (self.sign_extend(r, right_layout) as i128).to_string()
+            if overflow && let Some(intrinsic) = throw_ub_on_overflow {
+                throw_ub!(ShiftOverflow {
+                    intrinsic,
+                    shift_amount: if right.layout.abi.is_signed() {
+                        Either::Right(r_signed())
                     } else {
-                        r.to_string()
-                    },
-                    name = intrinsic_name
-                );
+                        Either::Left(r_unsigned())
+                    }
+                });
             }
 
-            return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
+            return Ok(ImmTy::from_scalar_int(result, left.layout));
         }
 
         // For the remaining ops, the types must be the same on both sides
-        if left_layout.ty != right_layout.ty {
+        if left.layout.ty != right.layout.ty {
             span_bug!(
                 self.cur_span(),
                 "invalid asymmetric binary op {bin_op:?}: {l:?} ({l_ty}), {r:?} ({r_ty})",
-                l_ty = left_layout.ty,
-                r_ty = right_layout.ty,
+                l_ty = left.layout.ty,
+                r_ty = right.layout.ty,
             )
         }
 
-        let size = left_layout.size;
+        let size = left.layout.size;
 
         // Operations that need special treatment for signed integers
-        if left_layout.abi.is_signed() {
+        if left.layout.abi.is_signed() {
             let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
                 Lt => Some(i128::lt),
                 Le => Some(i128::le),
@@ -236,28 +186,24 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 _ => None,
             };
             if let Some(op) = op {
-                let l = self.sign_extend(l, left_layout) as i128;
-                let r = self.sign_extend(r, right_layout) as i128;
-                return Ok((ImmTy::from_bool(op(&l, &r), *self.tcx), false));
+                return Ok(ImmTy::from_bool(op(&l_signed(), &r_signed()), *self.tcx));
             }
             if bin_op == Cmp {
-                let l = self.sign_extend(l, left_layout) as i128;
-                let r = self.sign_extend(r, right_layout) as i128;
-                return Ok(self.three_way_compare(l, r));
+                return Ok(self.three_way_compare(l_signed(), r_signed()));
             }
             let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
-                Div if r == 0 => throw_ub!(DivisionByZero),
-                Rem if r == 0 => throw_ub!(RemainderByZero),
+                Div if r.is_null() => throw_ub!(DivisionByZero),
+                Rem if r.is_null() => throw_ub!(RemainderByZero),
                 Div => Some(i128::overflowing_div),
                 Rem => Some(i128::overflowing_rem),
-                Add | AddUnchecked => Some(i128::overflowing_add),
-                Sub | SubUnchecked => Some(i128::overflowing_sub),
-                Mul | MulUnchecked => Some(i128::overflowing_mul),
+                Add | AddUnchecked | AddWithOverflow => Some(i128::overflowing_add),
+                Sub | SubUnchecked | SubWithOverflow => Some(i128::overflowing_sub),
+                Mul | MulUnchecked | MulWithOverflow => Some(i128::overflowing_mul),
                 _ => None,
             };
             if let Some(op) = op {
-                let l = self.sign_extend(l, left_layout) as i128;
-                let r = self.sign_extend(r, right_layout) as i128;
+                let l = l_signed();
+                let r = r_signed();
 
                 // We need a special check for overflowing Rem and Div since they are *UB*
                 // on overflow, which can happen with "int_min $OP -1".
@@ -272,23 +218,31 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 }
 
                 let (result, oflo) = op(l, r);
-                // This may be out-of-bounds for the result type, so we have to truncate ourselves.
+                // This may be out-of-bounds for the result type, so we have to truncate.
                 // If that truncation loses any information, we have an overflow.
-                let result = result as u128;
-                let truncated = self.truncate(result, left_layout);
-                let overflow = oflo || self.sign_extend(truncated, left_layout) != result;
-                if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
-                    throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
+                let (result, lossy) = ScalarInt::truncate_from_int(result, left.layout.size);
+                let overflow = oflo || lossy;
+                if overflow && let Some(intrinsic) = throw_ub_on_overflow {
+                    throw_ub!(ArithOverflow { intrinsic });
                 }
-                return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
+                let res = ImmTy::from_scalar_int(result, left.layout);
+                return Ok(if with_overflow {
+                    let overflow = ImmTy::from_bool(overflow, *self.tcx);
+                    ImmTy::from_pair(res, overflow, *self.tcx)
+                } else {
+                    res
+                });
             }
         }
+        // From here on it's okay to treat everything as unsigned.
+        let l = l_unsigned();
+        let r = r_unsigned();
 
         if bin_op == Cmp {
             return Ok(self.three_way_compare(l, r));
         }
 
-        let val = match bin_op {
+        Ok(match bin_op {
             Eq => ImmTy::from_bool(l == r, *self.tcx),
             Ne => ImmTy::from_bool(l != r, *self.tcx),
 
@@ -297,44 +251,46 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             Gt => ImmTy::from_bool(l > r, *self.tcx),
             Ge => ImmTy::from_bool(l >= r, *self.tcx),
 
-            BitOr => ImmTy::from_uint(l | r, left_layout),
-            BitAnd => ImmTy::from_uint(l & r, left_layout),
-            BitXor => ImmTy::from_uint(l ^ r, left_layout),
+            BitOr => ImmTy::from_uint(l | r, left.layout),
+            BitAnd => ImmTy::from_uint(l & r, left.layout),
+            BitXor => ImmTy::from_uint(l ^ r, left.layout),
 
-            Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => {
-                assert!(!left_layout.abi.is_signed());
+            _ => {
+                assert!(!left.layout.abi.is_signed());
                 let op: fn(u128, u128) -> (u128, bool) = match bin_op {
-                    Add | AddUnchecked => u128::overflowing_add,
-                    Sub | SubUnchecked => u128::overflowing_sub,
-                    Mul | MulUnchecked => u128::overflowing_mul,
+                    Add | AddUnchecked | AddWithOverflow => u128::overflowing_add,
+                    Sub | SubUnchecked | SubWithOverflow => u128::overflowing_sub,
+                    Mul | MulUnchecked | MulWithOverflow => u128::overflowing_mul,
                     Div if r == 0 => throw_ub!(DivisionByZero),
                     Rem if r == 0 => throw_ub!(RemainderByZero),
                     Div => u128::overflowing_div,
                     Rem => u128::overflowing_rem,
-                    _ => bug!(),
+                    _ => span_bug!(
+                        self.cur_span(),
+                        "invalid binary op {:?}: {:?}, {:?} (both {})",
+                        bin_op,
+                        left,
+                        right,
+                        right.layout.ty,
+                    ),
                 };
                 let (result, oflo) = op(l, r);
                 // Truncate to target type.
                 // If that truncation loses any information, we have an overflow.
-                let truncated = self.truncate(result, left_layout);
-                let overflow = oflo || truncated != result;
-                if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
-                    throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
+                let (result, lossy) = ScalarInt::truncate_from_uint(result, left.layout.size);
+                let overflow = oflo || lossy;
+                if overflow && let Some(intrinsic) = throw_ub_on_overflow {
+                    throw_ub!(ArithOverflow { intrinsic });
+                }
+                let res = ImmTy::from_scalar_int(result, left.layout);
+                if with_overflow {
+                    let overflow = ImmTy::from_bool(overflow, *self.tcx);
+                    ImmTy::from_pair(res, overflow, *self.tcx)
+                } else {
+                    res
                 }
-                return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
             }
-
-            _ => span_bug!(
-                self.cur_span(),
-                "invalid binary op {:?}: {:?}, {:?} (both {})",
-                bin_op,
-                l,
-                r,
-                right_layout.ty,
-            ),
-        };
-
-        Ok((val, false))
+        })
     }
 
     fn binary_ptr_op(
@@ -342,7 +298,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         bin_op: mir::BinOp,
         left: &ImmTy<'tcx, M::Provenance>,
         right: &ImmTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
         use rustc_middle::mir::BinOp::*;
 
         match bin_op {
@@ -350,7 +306,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             Offset => {
                 let ptr = left.to_scalar().to_pointer(self)?;
                 let offset_count = right.to_scalar().to_target_isize(self)?;
-                let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty;
+                let pointee_ty = left.layout.ty.builtin_deref(true).unwrap();
 
                 // We cannot overflow i64 as a type's size must be <= isize::MAX.
                 let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
@@ -361,10 +317,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
 
                 let offset_ptr = self.ptr_offset_inbounds(ptr, offset_bytes)?;
-                Ok((
-                    ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout),
-                    false,
-                ))
+                Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout))
             }
 
             // Fall back to machine hook so Miri can support more pointer ops.
@@ -372,13 +325,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         }
     }
 
-    /// Returns the result of the specified operation, and whether it overflowed.
-    pub fn overflowing_binary_op(
+    /// Returns the result of the specified operation.
+    ///
+    /// Whether this produces a scalar or a pair depends on the specific `bin_op`.
+    pub fn binary_op(
         &self,
         bin_op: mir::BinOp,
         left: &ImmTy<'tcx, M::Provenance>,
         right: &ImmTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
         trace!(
             "Running binary op {:?}: {:?} ({}), {:?} ({})",
             bin_op,
@@ -427,9 +382,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     right.layout.ty
                 );
 
-                let l = left.to_scalar().to_bits(left.layout.size)?;
-                let r = right.to_scalar().to_bits(right.layout.size)?;
-                self.binary_int_op(bin_op, l, left.layout, r, right.layout)
+                self.binary_int_op(bin_op, left, right)
             }
             _ if left.layout.ty.is_any_ptr() => {
                 // The RHS type must be a `pointer` *or an integer type* (for `Offset`).
@@ -452,77 +405,74 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         }
     }
 
-    #[inline]
-    pub fn wrapping_binary_op(
-        &self,
-        bin_op: mir::BinOp,
-        left: &ImmTy<'tcx, M::Provenance>,
-        right: &ImmTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
-        let (val, _overflow) = self.overflowing_binary_op(bin_op, left, right)?;
-        Ok(val)
-    }
-
     /// Returns the result of the specified operation, whether it overflowed, and
     /// the result type.
-    pub fn overflowing_unary_op(
+    pub fn unary_op(
         &self,
         un_op: mir::UnOp,
         val: &ImmTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
         use rustc_middle::mir::UnOp::*;
 
         let layout = val.layout;
-        let val = val.to_scalar();
         trace!("Running unary op {:?}: {:?} ({})", un_op, val, layout.ty);
 
         match layout.ty.kind() {
             ty::Bool => {
+                let val = val.to_scalar();
                 let val = val.to_bool()?;
                 let res = match un_op {
                     Not => !val,
                     _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
                 };
-                Ok((ImmTy::from_bool(res, *self.tcx), false))
+                Ok(ImmTy::from_bool(res, *self.tcx))
             }
             ty::Float(fty) => {
+                let val = val.to_scalar();
                 // No NaN adjustment here, `-` is a bitwise operation!
                 let res = match (un_op, fty) {
                     (Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
                     (Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
                     _ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
                 };
-                Ok((ImmTy::from_scalar(res, layout), false))
+                Ok(ImmTy::from_scalar(res, layout))
             }
-            _ => {
-                assert!(layout.ty.is_integral());
+            _ if layout.ty.is_integral() => {
+                let val = val.to_scalar();
                 let val = val.to_bits(layout.size)?;
-                let (res, overflow) = match un_op {
-                    Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
+                let res = match un_op {
+                    Not => self.truncate(!val, layout), // bitwise negation, then truncate
                     Neg => {
                         // arithmetic negation
                         assert!(layout.abi.is_signed());
                         let val = self.sign_extend(val, layout) as i128;
-                        let (res, overflow) = val.overflowing_neg();
+                        let res = val.wrapping_neg();
                         let res = res as u128;
                         // Truncate to target type.
-                        // If that truncation loses any information, we have an overflow.
-                        let truncated = self.truncate(res, layout);
-                        (truncated, overflow || self.sign_extend(truncated, layout) != res)
+                        self.truncate(res, layout)
                     }
+                    _ => span_bug!(self.cur_span(), "Invalid integer op {:?}", un_op),
                 };
-                Ok((ImmTy::from_uint(res, layout), overflow))
+                Ok(ImmTy::from_uint(res, layout))
+            }
+            ty::RawPtr(..) => {
+                assert_eq!(un_op, PtrMetadata);
+                let (_, meta) = val.to_scalar_and_meta();
+                Ok(match meta {
+                    MemPlaceMeta::Meta(scalar) => {
+                        let ty = un_op.ty(*self.tcx, val.layout.ty);
+                        let layout = self.layout_of(ty)?;
+                        ImmTy::from_scalar(scalar, layout)
+                    }
+                    MemPlaceMeta::None => {
+                        let unit_layout = self.layout_of(self.tcx.types.unit)?;
+                        ImmTy::uninit(unit_layout)
+                    }
+                })
+            }
+            _ => {
+                bug!("Unexpected unary op argument {val:?}")
             }
         }
     }
-
-    #[inline]
-    pub fn wrapping_unary_op(
-        &self,
-        un_op: mir::UnOp,
-        val: &ImmTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
-        let (val, _overflow) = self.overflowing_unary_op(un_op, val)?;
-        Ok(val)
-    }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index 1549eddabbc..4a86ec3f57a 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -5,18 +5,21 @@
 use std::assert_matches::assert_matches;
 
 use either::{Either, Left, Right};
+use tracing::{instrument, trace};
 
 use rustc_ast::Mutability;
 use rustc_middle::mir;
 use rustc_middle::ty;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::Ty;
+use rustc_middle::{bug, span_bug};
 use rustc_target::abi::{Abi, Align, HasDataLayout, Size};
 
 use super::{
-    alloc_range, mir_assign_valid_types, AllocRef, AllocRefMut, CheckAlignMsg, CtfeProvenance,
-    ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, Misalignment, OffsetMode, OpTy,
-    Operand, Pointer, PointerArithmetic, Projectable, Provenance, Readable, Scalar,
+    alloc_range, mir_assign_valid_types, throw_ub, AllocRef, AllocRefMut, CheckAlignMsg,
+    CtfeProvenance, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, Misalignment,
+    OffsetMode, OpTy, Operand, Pointer, PointerArithmetic, Projectable, Provenance, Readable,
+    Scalar,
 };
 
 #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@@ -74,12 +77,12 @@ impl<Prov: Provenance> MemPlace<Prov> {
 
     #[inline]
     // Not called `offset_with_meta` to avoid confusion with the trait method.
-    fn offset_with_meta_<'mir, 'tcx, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn offset_with_meta_<'tcx, M: Machine<'tcx, Provenance = Prov>>(
         self,
         offset: Size,
         mode: OffsetMode,
         meta: MemPlaceMeta<Prov>,
-        ecx: &InterpCx<'mir, 'tcx, M>,
+        ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, Self> {
         debug_assert!(
             !meta.has_meta() || self.meta.has_meta(),
@@ -159,20 +162,20 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
         self.mplace.meta
     }
 
-    fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn offset_with_meta<M: Machine<'tcx, Provenance = Prov>>(
         &self,
         offset: Size,
         mode: OffsetMode,
         meta: MemPlaceMeta<Prov>,
         layout: TyAndLayout<'tcx>,
-        ecx: &InterpCx<'mir, 'tcx, M>,
+        ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, Self> {
         Ok(MPlaceTy { mplace: self.mplace.offset_with_meta_(offset, mode, meta, ecx)?, layout })
     }
 
-    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn to_op<M: Machine<'tcx, Provenance = Prov>>(
         &self,
-        _ecx: &InterpCx<'mir, 'tcx, M>,
+        _ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
         Ok(self.clone().into())
     }
@@ -271,13 +274,13 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
         }
     }
 
-    fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn offset_with_meta<M: Machine<'tcx, Provenance = Prov>>(
         &self,
         offset: Size,
         mode: OffsetMode,
         meta: MemPlaceMeta<Prov>,
         layout: TyAndLayout<'tcx>,
-        ecx: &InterpCx<'mir, 'tcx, M>,
+        ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, Self> {
         Ok(match self.as_mplace_or_local() {
             Left(mplace) => mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into(),
@@ -302,9 +305,9 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
         })
     }
 
-    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn to_op<M: Machine<'tcx, Provenance = Prov>>(
         &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
+        ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
         ecx.place_to_op(self)
     }
@@ -338,9 +341,9 @@ pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
         &self,
     ) -> Either<MPlaceTy<'tcx, Prov>, (mir::Local, Option<Size>, usize, TyAndLayout<'tcx>)>;
 
-    fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn force_mplace<M: Machine<'tcx, Provenance = Prov>>(
         &self,
-        ecx: &mut InterpCx<'mir, 'tcx, M>,
+        ecx: &mut InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>>;
 }
 
@@ -354,9 +357,9 @@ impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
     }
 
     #[inline(always)]
-    fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn force_mplace<M: Machine<'tcx, Provenance = Prov>>(
         &self,
-        ecx: &mut InterpCx<'mir, 'tcx, M>,
+        ecx: &mut InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
         ecx.force_allocation(self)
     }
@@ -371,19 +374,19 @@ impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
     }
 
     #[inline(always)]
-    fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn force_mplace<M: Machine<'tcx, Provenance = Prov>>(
         &self,
-        _ecx: &mut InterpCx<'mir, 'tcx, M>,
+        _ecx: &mut InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
         Ok(self.clone())
     }
 }
 
 // FIXME: Working around https://github.com/rust-lang/rust/issues/54385
-impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
+impl<'tcx, Prov, M> InterpCx<'tcx, M>
 where
     Prov: Provenance,
-    M: Machine<'mir, 'tcx, Provenance = Prov>,
+    M: Machine<'tcx, Provenance = Prov>,
 {
     pub fn ptr_with_meta_to_mplace(
         &self,
@@ -415,7 +418,7 @@ where
         val: &ImmTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
         let pointee_type =
-            val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty;
+            val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type");
         let layout = self.layout_of(pointee_type)?;
         let (ptr, meta) = val.to_scalar_and_meta();
 
@@ -1020,16 +1023,20 @@ where
     pub(super) fn unpack_dyn_trait(
         &self,
         mplace: &MPlaceTy<'tcx, M::Provenance>,
+        expected_trait: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
     ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> {
         assert!(
             matches!(mplace.layout.ty.kind(), ty::Dynamic(_, _, ty::Dyn)),
             "`unpack_dyn_trait` only makes sense on `dyn*` types"
         );
         let vtable = mplace.meta().unwrap_meta().to_pointer(self)?;
-        let (ty, _) = self.get_ptr_vtable(vtable)?;
-        let layout = self.layout_of(ty)?;
+        let (ty, vtable_trait) = self.get_ptr_vtable(vtable)?;
+        if expected_trait.principal() != vtable_trait {
+            throw_ub!(InvalidVTableTrait { expected_trait, vtable_trait });
+        }
         // This is a kind of transmute, from a place with unsized type and metadata to
         // a place with sized type and no metadata.
+        let layout = self.layout_of(ty)?;
         let mplace =
             MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..mplace.mplace }, layout };
         Ok((mplace, vtable))
@@ -1040,6 +1047,7 @@ where
     pub(super) fn unpack_dyn_star<P: Projectable<'tcx, M::Provenance>>(
         &self,
         val: &P,
+        expected_trait: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
     ) -> InterpResult<'tcx, (P, Pointer<Option<M::Provenance>>)> {
         assert!(
             matches!(val.layout().ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
@@ -1048,17 +1056,19 @@ where
         let data = self.project_field(val, 0)?;
         let vtable = self.project_field(val, 1)?;
         let vtable = self.read_pointer(&vtable.to_op(self)?)?;
-        let (ty, _) = self.get_ptr_vtable(vtable)?;
+        let (ty, vtable_trait) = self.get_ptr_vtable(vtable)?;
+        if expected_trait.principal() != vtable_trait {
+            throw_ub!(InvalidVTableTrait { expected_trait, vtable_trait });
+        }
+        // `data` is already the right thing but has the wrong type. So we transmute it.
         let layout = self.layout_of(ty)?;
-        // `data` is already the right thing but has the wrong type. So we transmute it, by
-        // projecting with offset 0.
         let data = data.transmute(layout, self)?;
         Ok((data, vtable))
     }
 }
 
 // Some nodes are used a lot. Make sure they don't unintentionally get bigger.
-#[cfg(all(any(target_arch = "x86_64", target_arch = "aarch64"), target_pointer_width = "64"))]
+#[cfg(target_pointer_width = "64")]
 mod size_asserts {
     use super::*;
     use rustc_data_structures::static_assert_size;
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index 5ff78f7b8c9..0e594914c3a 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -14,10 +14,16 @@ use rustc_middle::mir;
 use rustc_middle::ty;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::Ty;
+use rustc_middle::{bug, span_bug};
 use rustc_target::abi::Size;
 use rustc_target::abi::{self, VariantIdx};
 
-use super::{InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Provenance, Scalar};
+use tracing::{debug, instrument};
+
+use super::{
+    throw_ub, throw_unsup_format, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy,
+    Provenance, Scalar,
+};
 
 /// Describes the constraints placed on offset-projections.
 #[derive(Copy, Clone, Debug)]
@@ -37,9 +43,9 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
     fn meta(&self) -> MemPlaceMeta<Prov>;
 
     /// Get the length of a slice/string/array stored here.
-    fn len<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn len<M: Machine<'tcx, Provenance = Prov>>(
         &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
+        ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, u64> {
         let layout = self.layout();
         if layout.is_unsized() {
@@ -59,29 +65,29 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
     }
 
     /// Offset the value by the given amount, replacing the layout and metadata.
-    fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn offset_with_meta<M: Machine<'tcx, Provenance = Prov>>(
         &self,
         offset: Size,
         mode: OffsetMode,
         meta: MemPlaceMeta<Prov>,
         layout: TyAndLayout<'tcx>,
-        ecx: &InterpCx<'mir, 'tcx, M>,
+        ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, Self>;
 
-    fn offset<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn offset<M: Machine<'tcx, Provenance = Prov>>(
         &self,
         offset: Size,
         layout: TyAndLayout<'tcx>,
-        ecx: &InterpCx<'mir, 'tcx, M>,
+        ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, Self> {
         assert!(layout.is_sized());
         self.offset_with_meta(offset, OffsetMode::Inbounds, MemPlaceMeta::None, layout, ecx)
     }
 
-    fn transmute<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn transmute<M: Machine<'tcx, Provenance = Prov>>(
         &self,
         layout: TyAndLayout<'tcx>,
-        ecx: &InterpCx<'mir, 'tcx, M>,
+        ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, Self> {
         assert!(self.layout().is_sized() && layout.is_sized());
         assert_eq!(self.layout().size, layout.size);
@@ -90,9 +96,9 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
 
     /// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for
     /// reading from this thing.
-    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    fn to_op<M: Machine<'tcx, Provenance = Prov>>(
         &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
+        ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
 }
 
@@ -107,9 +113,9 @@ pub struct ArrayIterator<'tcx, 'a, Prov: Provenance, P: Projectable<'tcx, Prov>>
 
 impl<'tcx, 'a, Prov: Provenance, P: Projectable<'tcx, Prov>> ArrayIterator<'tcx, 'a, Prov, P> {
     /// Should be the same `ecx` on each call, and match the one used to create the iterator.
-    pub fn next<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+    pub fn next<M: Machine<'tcx, Provenance = Prov>>(
         &mut self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
+        ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, Option<(u64, P)>> {
         let Some(idx) = self.range.next() else { return Ok(None) };
         // We use `Wrapping` here since the offset has already been checked when the iterator was created.
@@ -127,10 +133,10 @@ impl<'tcx, 'a, Prov: Provenance, P: Projectable<'tcx, Prov>> ArrayIterator<'tcx,
 }
 
 // FIXME: Working around https://github.com/rust-lang/rust/issues/54385
-impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
+impl<'tcx, Prov, M> InterpCx<'tcx, M>
 where
     Prov: Provenance,
-    M: Machine<'mir, 'tcx, Provenance = Prov>,
+    M: Machine<'tcx, Provenance = Prov>,
 {
     /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
     /// always possible without allocating, so it can take `&self`. Also return the field's layout.
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index c3f26da8a79..d0bb821862a 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -3,16 +3,20 @@
 //! The main entry point is the `step` method.
 
 use either::Either;
+use tracing::{info, instrument, trace};
 
 use rustc_index::IndexSlice;
 use rustc_middle::mir;
 use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::{bug, span_bug};
 use rustc_target::abi::{FieldIdx, FIRST_VARIANT};
 
-use super::{ImmTy, InterpCx, InterpResult, Machine, PlaceTy, Projectable, Scalar};
+use super::{
+    ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta, PlaceTy, Projectable, Scalar,
+};
 use crate::util;
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// Returns `true` as long as there are more things to do.
     ///
     /// This is used by [priroda](https://github.com/oli-obk/priroda)
@@ -164,23 +168,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
                 let layout = util::binop_right_homogeneous(bin_op).then_some(left.layout);
                 let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
-                self.binop_ignore_overflow(bin_op, &left, &right, &dest)?;
-            }
-
-            CheckedBinaryOp(bin_op, box (ref left, ref right)) => {
-                // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
-                let left = self.read_immediate(&self.eval_operand(left, None)?)?;
-                let layout = util::binop_right_homogeneous(bin_op).then_some(left.layout);
-                let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
-                self.binop_with_overflow(bin_op, &left, &right, &dest)?;
+                let result = self.binary_op(bin_op, &left, &right)?;
+                assert_eq!(result.layout, dest.layout, "layout mismatch for result of {bin_op:?}");
+                self.write_immediate(*result, &dest)?;
             }
 
             UnaryOp(un_op, ref operand) => {
                 // The operand always has the same type as the result.
                 let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
-                let val = self.wrapping_unary_op(un_op, &val)?;
-                assert_eq!(val.layout, dest.layout, "layout mismatch for result of {un_op:?}");
-                self.write_immediate(*val, &dest)?;
+                let result = self.unary_op(un_op, &val)?;
+                assert_eq!(result.layout, dest.layout, "layout mismatch for result of {un_op:?}");
+                self.write_immediate(*result, &dest)?;
             }
 
             Aggregate(box ref kind, ref operands) => {
@@ -303,6 +301,27 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let variant_dest = self.project_downcast(dest, variant_index)?;
                 (variant_index, variant_dest, active_field_index)
             }
+            mir::AggregateKind::RawPtr(..) => {
+                // Pointers don't have "fields" in the normal sense, so the
+                // projection-based code below would either fail in projection
+                // or in type mismatches. Instead, build an `Immediate` from
+                // the parts and write that to the destination.
+                let [data, meta] = &operands.raw else {
+                    bug!("{kind:?} should have 2 operands, had {operands:?}");
+                };
+                let data = self.eval_operand(data, None)?;
+                let data = self.read_pointer(&data)?;
+                let meta = self.eval_operand(meta, None)?;
+                let meta = if meta.layout.is_zst() {
+                    MemPlaceMeta::None
+                } else {
+                    MemPlaceMeta::Meta(self.read_scalar(&meta)?)
+                };
+                let ptr_imm = Immediate::new_pointer_with_meta(data, meta, self);
+                let ptr = ImmTy::from_immediate(ptr_imm, dest.layout);
+                self.copy_op(&ptr, dest)?;
+                return Ok(());
+            }
             _ => (FIRST_VARIANT, dest.clone(), None),
         };
         if active_field_index.is_some() {
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index c0e27e86d50..0649bb5617c 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -1,7 +1,9 @@
 use std::borrow::Cow;
 
 use either::Either;
+use tracing::trace;
 
+use rustc_middle::span_bug;
 use rustc_middle::{
     mir,
     ty::{
@@ -19,8 +21,9 @@ use rustc_target::abi::{
 use rustc_target::spec::abi::Abi;
 
 use super::{
-    CtfeProvenance, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy,
-    Projectable, Provenance, Scalar, StackPopCleanup,
+    throw_ub, throw_ub_custom, throw_unsup_format, CtfeProvenance, FnVal, ImmTy, InterpCx,
+    InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Projectable, Provenance, Scalar,
+    StackPopCleanup,
 };
 use crate::fluent_generated as fluent;
 
@@ -43,7 +46,7 @@ impl<'tcx, Prov: Provenance> FnArg<'tcx, Prov> {
     }
 }
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// Make a copy of the given fn_arg. Any `InPlace` are degenerated to copies, no protection of the
     /// original memory occurs.
     pub fn copy_fn_arg(&self, arg: &FnArg<'tcx, M::Provenance>) -> OpTy<'tcx, M::Provenance> {
@@ -95,7 +98,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 for (const_int, target) in targets.iter() {
                     // Compare using MIR BinOp::Eq, to also support pointer values.
                     // (Avoiding `self.binary_op` as that does some redundant layout computation.)
-                    let res = self.wrapping_binary_op(
+                    let res = self.binary_op(
                         mir::BinOp::Eq,
                         &discr,
                         &ImmTy::from_uint(const_int, discr.layout),
@@ -169,10 +172,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
 
             Drop { place, target, unwind, replace: _ } => {
-                let frame = self.frame();
-                let ty = place.ty(&frame.body.local_decls, *self.tcx).ty;
-                let ty = self.instantiate_from_frame_and_normalize_erasing_regions(frame, ty)?;
-                let instance = Instance::resolve_drop_in_place(*self.tcx, ty);
+                let place = self.eval_place(place)?;
+                let instance = Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
                 if let ty::InstanceDef::DropGlue(_, None) = instance.def {
                     // This is the branch we enter if and only if the dropped type has no drop glue
                     // whatsoever. This can happen as a result of monomorphizing a drop of a
@@ -181,8 +182,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     self.go_to_block(target);
                     return Ok(());
                 }
-                let place = self.eval_place(place)?;
-                trace!("TerminatorKind::drop: {:?}, type {}", place, ty);
+                trace!("TerminatorKind::drop: {:?}, type {}", place, place.layout.ty);
                 self.drop_in_place(&place, instance, target, unwind)?;
             }
 
@@ -539,14 +539,28 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             ty::InstanceDef::Intrinsic(def_id) => {
                 assert!(self.tcx.intrinsic(def_id).is_some());
                 // FIXME: Should `InPlace` arguments be reset to uninit?
-                M::call_intrinsic(
+                if let Some(fallback) = M::call_intrinsic(
                     self,
                     instance,
                     &self.copy_fn_args(args),
                     destination,
                     target,
                     unwind,
-                )
+                )? {
+                    assert!(!self.tcx.intrinsic(fallback.def_id()).unwrap().must_be_overridden);
+                    assert!(matches!(fallback.def, ty::InstanceDef::Item(_)));
+                    return self.eval_fn_call(
+                        FnVal::Instance(fallback),
+                        (caller_abi, caller_fn_abi),
+                        args,
+                        with_caller_location,
+                        destination,
+                        target,
+                        unwind,
+                    );
+                } else {
+                    Ok(())
+                }
             }
             ty::InstanceDef::VTableShim(..)
             | ty::InstanceDef::ReifyShim(..)
@@ -558,6 +572,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             | ty::InstanceDef::CloneShim(..)
             | ty::InstanceDef::FnPtrAddrShim(..)
             | ty::InstanceDef::ThreadLocalShim(..)
+            | ty::InstanceDef::AsyncDropGlueCtorShim(..)
             | ty::InstanceDef::Item(_) => {
                 // We need MIR for this fn
                 let Some((body, instance)) = M::find_mir_or_eval_fn(
@@ -802,11 +817,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let (vptr, dyn_ty, adjusted_receiver) = if let ty::Dynamic(data, _, ty::DynStar) =
                     receiver_place.layout.ty.kind()
                 {
-                    let (recv, vptr) = self.unpack_dyn_star(&receiver_place)?;
-                    let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
-                    if dyn_trait != data.principal() {
-                        throw_ub_custom!(fluent::const_eval_dyn_star_call_vtable_mismatch);
-                    }
+                    let (recv, vptr) = self.unpack_dyn_star(&receiver_place, data)?;
+                    let (dyn_ty, _dyn_trait) = self.get_ptr_vtable(vptr)?;
 
                     (vptr, dyn_ty, recv.ptr())
                 } else {
@@ -828,7 +840,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     let vptr = receiver_place.meta().unwrap_meta().to_pointer(self)?;
                     let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
                     if dyn_trait != data.principal() {
-                        throw_ub_custom!(fluent::const_eval_dyn_call_vtable_mismatch);
+                        throw_ub!(InvalidVTableTrait {
+                            expected_trait: data,
+                            vtable_trait: dyn_trait,
+                        });
                     }
 
                     // It might be surprising that we use a pointer as the receiver even if this
@@ -937,14 +952,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // implementation fail -- a problem shared by rustc.
         let place = self.force_allocation(place)?;
 
+        // We behave a bit different from codegen here.
+        // Codegen creates an `InstanceDef::Virtual` with index 0 (the slot of the drop method) and
+        // then dispatches that to the normal call machinery. However, our call machinery currently
+        // only supports calling `VtblEntry::Method`; it would choke on a `MetadataDropInPlace`. So
+        // instead we do the virtual call stuff ourselves. It's easier here than in `eval_fn_call`
+        // since we can just get a place of the underlying type and use `mplace_to_ref`.
         let place = match place.layout.ty.kind() {
-            ty::Dynamic(_, _, ty::Dyn) => {
+            ty::Dynamic(data, _, ty::Dyn) => {
                 // Dropping a trait object. Need to find actual drop fn.
-                self.unpack_dyn_trait(&place)?.0
+                self.unpack_dyn_trait(&place, data)?.0
             }
-            ty::Dynamic(_, _, ty::DynStar) => {
+            ty::Dynamic(data, _, ty::DynStar) => {
                 // Dropping a `dyn*`. Need to find actual drop fn.
-                self.unpack_dyn_star(&place)?.0
+                self.unpack_dyn_star(&place, data)?.0
             }
             _ => {
                 debug_assert_eq!(
diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs
index b603ef0d27a..244a6ba48a4 100644
--- a/compiler/rustc_const_eval/src/interpret/traits.rs
+++ b/compiler/rustc_const_eval/src/interpret/traits.rs
@@ -2,11 +2,12 @@ use rustc_middle::mir::interpret::{InterpResult, Pointer};
 use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_target::abi::{Align, Size};
+use tracing::trace;
 
 use super::util::ensure_monomorphic_enough;
 use super::{InterpCx, Machine};
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// Creates a dynamic vtable for the given type and vtable origin. This is used only for
     /// objects.
     ///
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
index c83ef14c03f..10fd6399b9a 100644
--- a/compiler/rustc_const_eval/src/interpret/util.rs
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -1,5 +1,4 @@
 use crate::const_eval::{CompileTimeEvalContext, CompileTimeInterpreter, InterpretationResult};
-use crate::interpret::{MemPlaceMeta, MemoryKind};
 use rustc_hir::def_id::LocalDefId;
 use rustc_middle::mir;
 use rustc_middle::mir::interpret::{Allocation, InterpResult, Pointer};
@@ -8,8 +7,9 @@ use rustc_middle::ty::{
     self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor,
 };
 use std::ops::ControlFlow;
+use tracing::debug;
 
-use super::{InterpCx, MPlaceTy};
+use super::{throw_inval, InterpCx, MPlaceTy, MemPlaceMeta, MemoryKind};
 
 /// Checks whether a type contains generic parameters which must be instantiated.
 ///
@@ -82,9 +82,9 @@ where
 }
 
 impl<'tcx> InterpretationResult<'tcx> for mir::interpret::ConstAllocation<'tcx> {
-    fn make_result<'mir>(
+    fn make_result(
         mplace: MPlaceTy<'tcx>,
-        ecx: &mut InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>,
+        ecx: &mut InterpCx<'tcx, CompileTimeInterpreter<'tcx>>,
     ) -> Self {
         let alloc_id = mplace.ptr().provenance.unwrap().alloc_id();
         let alloc = ecx.memory.alloc_map.swap_remove(&alloc_id).unwrap().1;
@@ -92,8 +92,8 @@ impl<'tcx> InterpretationResult<'tcx> for mir::interpret::ConstAllocation<'tcx>
     }
 }
 
-pub(crate) fn create_static_alloc<'mir, 'tcx: 'mir>(
-    ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
+pub(crate) fn create_static_alloc<'tcx>(
+    ecx: &mut CompileTimeEvalContext<'tcx>,
     static_def_id: LocalDefId,
     layout: TyAndLayout<'tcx>,
 ) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 9911c59d4b8..e35ce9ef28d 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -8,11 +8,13 @@ use std::fmt::Write;
 use std::num::NonZero;
 
 use either::{Left, Right};
+use tracing::trace;
 
 use hir::def::DefKind;
 use rustc_ast::Mutability;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_hir as hir;
+use rustc_middle::bug;
 use rustc_middle::mir::interpret::{
     ExpectedKind, InterpError, InvalidMetaKind, Misalignment, PointerKind, Provenance,
     ValidationErrorInfo, ValidationErrorKind, ValidationErrorKind::*,
@@ -27,9 +29,9 @@ use rustc_target::abi::{
 use std::hash::Hash;
 
 use super::{
-    format_interp_error, machine::AllocMap, AllocId, CheckInAllocMsg, GlobalAlloc, ImmTy,
-    Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Pointer, Projectable,
-    Scalar, ValueVisitor,
+    err_ub, format_interp_error, machine::AllocMap, throw_ub, AllocId, CheckInAllocMsg,
+    GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy,
+    Pointer, Projectable, Scalar, ValueVisitor,
 };
 
 // for the validation errors
@@ -203,7 +205,7 @@ fn write_path(out: &mut String, path: &[PathElem]) {
     }
 }
 
-struct ValidityVisitor<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+struct ValidityVisitor<'rt, 'tcx, M: Machine<'tcx>> {
     /// The `path` may be pushed to, but the part that is present when a function
     /// starts must not be changed!  `visit_fields` and `visit_array` rely on
     /// this stack discipline.
@@ -211,10 +213,10 @@ struct ValidityVisitor<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
     ref_tracking: Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>>,
     /// `None` indicates this is not validating for CTFE (but for runtime).
     ctfe_mode: Option<CtfeValidationMode>,
-    ecx: &'rt InterpCx<'mir, 'tcx, M>,
+    ecx: &'rt InterpCx<'tcx, M>,
 }
 
-impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, 'tcx, M> {
+impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
     fn aggregate_field_path_elem(&mut self, layout: TyAndLayout<'tcx>, field: usize) -> PathElem {
         // First, check if we are projecting to a variant.
         match layout.variants {
@@ -339,16 +341,22 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
     ) -> InterpResult<'tcx> {
         let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
         match tail.kind() {
-            ty::Dynamic(_, _, ty::Dyn) => {
+            ty::Dynamic(data, _, ty::Dyn) => {
                 let vtable = meta.unwrap_meta().to_pointer(self.ecx)?;
                 // Make sure it is a genuine vtable pointer.
-                let (_ty, _trait) = try_validation!(
+                let (_dyn_ty, dyn_trait) = try_validation!(
                     self.ecx.get_ptr_vtable(vtable),
                     self.path,
                     Ub(DanglingIntPointer(..) | InvalidVTablePointer(..)) =>
                         InvalidVTablePtr { value: format!("{vtable}") }
                 );
-                // FIXME: check if the type/trait match what ty::Dynamic says?
+                // Make sure it is for the right trait.
+                if dyn_trait != data.principal() {
+                    throw_validation_failure!(
+                        self.path,
+                        InvalidMetaWrongTrait { expected_trait: data, vtable_trait: dyn_trait }
+                    );
+                }
             }
             ty::Slice(..) | ty::Str => {
                 let _len = meta.unwrap_meta().to_target_usize(self.ecx)?;
@@ -427,6 +435,11 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
                 found_bytes: has.bytes()
             },
         );
+        // Make sure this is non-null. We checked dereferenceability above, but if `size` is zero
+        // that does not imply non-null.
+        if self.ecx.scalar_may_be_null(Scalar::from_maybe_pointer(place.ptr(), self.ecx))? {
+            throw_validation_failure!(self.path, NullPtr { ptr_kind })
+        }
         // Do not allow pointers to uninhabited types.
         if place.layout.abi.is_uninhabited() {
             let ty = place.layout.ty;
@@ -449,71 +462,46 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
             // `!` is a ZST and we want to validate it.
             if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr()) {
                 let mut skip_recursive_check = false;
-                // Let's see what kind of memory this points to.
-                // `unwrap` since dangling pointers have already been handled.
-                let alloc_kind = self.ecx.tcx.try_get_global_alloc(alloc_id).unwrap();
-                let alloc_actual_mutbl = match alloc_kind {
-                    GlobalAlloc::Static(did) => {
-                        // Special handling for pointers to statics (irrespective of their type).
-                        assert!(!self.ecx.tcx.is_thread_local_static(did));
-                        assert!(self.ecx.tcx.is_static(did));
-                        // Mode-specific checks
-                        match self.ctfe_mode {
-                            Some(
-                                CtfeValidationMode::Static { .. }
-                                | CtfeValidationMode::Promoted { .. },
-                            ) => {
-                                // We skip recursively checking other statics. These statics must be sound by
-                                // themselves, and the only way to get broken statics here is by using
-                                // unsafe code.
-                                // The reasons we don't check other statics is twofold. For one, in all
-                                // sound cases, the static was already validated on its own, and second, we
-                                // trigger cycle errors if we try to compute the value of the other static
-                                // and that static refers back to us (potentially through a promoted).
-                                // This could miss some UB, but that's fine.
-                                skip_recursive_check = true;
-                            }
-                            Some(CtfeValidationMode::Const { .. }) => {
-                                // We can't recursively validate `extern static`, so we better reject them.
-                                if self.ecx.tcx.is_foreign_item(did) {
-                                    throw_validation_failure!(self.path, ConstRefToExtern);
-                                }
-                            }
-                            None => {}
+                if let Some(GlobalAlloc::Static(did)) = self.ecx.tcx.try_get_global_alloc(alloc_id)
+                {
+                    let DefKind::Static { nested, .. } = self.ecx.tcx.def_kind(did) else { bug!() };
+                    // Special handling for pointers to statics (irrespective of their type).
+                    assert!(!self.ecx.tcx.is_thread_local_static(did));
+                    assert!(self.ecx.tcx.is_static(did));
+                    // Mode-specific checks
+                    match self.ctfe_mode {
+                        Some(
+                            CtfeValidationMode::Static { .. } | CtfeValidationMode::Promoted { .. },
+                        ) => {
+                            // We skip recursively checking other statics. These statics must be sound by
+                            // themselves, and the only way to get broken statics here is by using
+                            // unsafe code.
+                            // The reasons we don't check other statics is twofold. For one, in all
+                            // sound cases, the static was already validated on its own, and second, we
+                            // trigger cycle errors if we try to compute the value of the other static
+                            // and that static refers back to us (potentially through a promoted).
+                            // This could miss some UB, but that's fine.
+                            // We still walk nested allocations, as they are fundamentally part of this validation run.
+                            // This means we will also recurse into nested statics of *other*
+                            // statics, even though we do not recurse into other statics directly.
+                            // That's somewhat inconsistent but harmless.
+                            skip_recursive_check = !nested;
                         }
-                        // Return alloc mutability. For "root" statics we look at the type to account for interior
-                        // mutability; for nested statics we have no type and directly use the annotated mutability.
-                        let DefKind::Static { mutability, nested } = self.ecx.tcx.def_kind(did)
-                        else {
-                            bug!()
-                        };
-                        match (mutability, nested) {
-                            (Mutability::Mut, _) => Mutability::Mut,
-                            (Mutability::Not, true) => Mutability::Not,
-                            (Mutability::Not, false)
-                                if !self
-                                    .ecx
-                                    .tcx
-                                    .type_of(did)
-                                    .no_bound_vars()
-                                    .expect("statics should not have generic parameters")
-                                    .is_freeze(*self.ecx.tcx, ty::ParamEnv::reveal_all()) =>
-                            {
-                                Mutability::Mut
+                        Some(CtfeValidationMode::Const { .. }) => {
+                            // We can't recursively validate `extern static`, so we better reject them.
+                            if self.ecx.tcx.is_foreign_item(did) {
+                                throw_validation_failure!(self.path, ConstRefToExtern);
                             }
-                            (Mutability::Not, false) => Mutability::Not,
                         }
+                        None => {}
                     }
-                    GlobalAlloc::Memory(alloc) => alloc.inner().mutability,
-                    GlobalAlloc::Function(..) | GlobalAlloc::VTable(..) => {
-                        // These are immutable, we better don't allow mutable pointers here.
-                        Mutability::Not
-                    }
-                };
+                }
+
                 // Mutability check.
                 // If this allocation has size zero, there is no actual mutability here.
                 let (size, _align, _alloc_kind) = self.ecx.get_alloc_info(alloc_id);
                 if size != Size::ZERO {
+                    let alloc_actual_mutbl = mutability(self.ecx, alloc_id);
                     // Mutable pointer to immutable memory is no good.
                     if ptr_expected_mutbl == Mutability::Mut
                         && alloc_actual_mutbl == Mutability::Not
@@ -708,27 +696,63 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
     fn in_mutable_memory(&self, op: &OpTy<'tcx, M::Provenance>) -> bool {
         if let Some(mplace) = op.as_mplace_or_imm().left() {
             if let Some(alloc_id) = mplace.ptr().provenance.and_then(|p| p.get_alloc_id()) {
-                let mutability = match self.ecx.tcx.global_alloc(alloc_id) {
-                    GlobalAlloc::Static(_) => {
-                        self.ecx.memory.alloc_map.get(alloc_id).unwrap().1.mutability
+                return mutability(self.ecx, alloc_id).is_mut();
+            }
+        }
+        false
+    }
+}
+
+/// Returns whether the allocation is mutable, and whether it's actually a static.
+/// For "root" statics we look at the type to account for interior
+/// mutability; for nested statics we have no type and directly use the annotated mutability.
+fn mutability<'tcx>(ecx: &InterpCx<'tcx, impl Machine<'tcx>>, alloc_id: AllocId) -> Mutability {
+    // Let's see what kind of memory this points to.
+    // We're not using `try_global_alloc` since dangling pointers have already been handled.
+    match ecx.tcx.global_alloc(alloc_id) {
+        GlobalAlloc::Static(did) => {
+            let DefKind::Static { mutability, nested } = ecx.tcx.def_kind(did) else { bug!() };
+            if nested {
+                assert!(
+                    ecx.memory.alloc_map.get(alloc_id).is_none(),
+                    "allocations of nested statics are already interned: {alloc_id:?}, {did:?}"
+                );
+                // Nested statics in a `static` are never interior mutable,
+                // so just use the declared mutability.
+                mutability
+            } else {
+                let mutability = match mutability {
+                    Mutability::Not
+                        if !ecx
+                            .tcx
+                            .type_of(did)
+                            .no_bound_vars()
+                            .expect("statics should not have generic parameters")
+                            .is_freeze(*ecx.tcx, ty::ParamEnv::reveal_all()) =>
+                    {
+                        Mutability::Mut
                     }
-                    GlobalAlloc::Memory(alloc) => alloc.inner().mutability,
-                    _ => span_bug!(self.ecx.tcx.span, "not a memory allocation"),
+                    _ => mutability,
                 };
-                return mutability == Mutability::Mut;
+                if let Some((_, alloc)) = ecx.memory.alloc_map.get(alloc_id) {
+                    assert_eq!(alloc.mutability, mutability);
+                }
+                mutability
             }
         }
-        false
+        GlobalAlloc::Memory(alloc) => alloc.inner().mutability,
+        GlobalAlloc::Function(..) | GlobalAlloc::VTable(..) => {
+            // These are immutable, we better don't allow mutable pointers here.
+            Mutability::Not
+        }
     }
 }
 
-impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
-    for ValidityVisitor<'rt, 'mir, 'tcx, M>
-{
+impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt, 'tcx, M> {
     type V = OpTy<'tcx, M::Provenance>;
 
     #[inline(always)]
-    fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
+    fn ecx(&self) -> &InterpCx<'tcx, M> {
         self.ecx
     }
 
@@ -809,6 +833,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
         trace!("visit_value: {:?}, {:?}", *op, op.layout);
 
         // Check primitive types -- the leaves of our recursive descent.
+        // We assume that the Scalar validity range does not restrict these values
+        // any further than `try_visit_primitive` does!
         if self.try_visit_primitive(op)? {
             return Ok(());
         }
@@ -918,7 +944,16 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                 }
             }
             _ => {
-                self.walk_value(op)?; // default handler
+                // default handler
+                try_validation!(
+                    self.walk_value(op),
+                    self.path,
+                    // It's not great to catch errors here, since we can't give a very good path,
+                    // but it's better than ICEing.
+                    Ub(InvalidVTableTrait { expected_trait, vtable_trait }) => {
+                        InvalidMetaWrongTrait { expected_trait, vtable_trait: *vtable_trait }
+                    },
+                );
             }
         }
 
@@ -969,7 +1004,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
     }
 }
 
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     fn validate_operand_internal(
         &self,
         op: &OpTy<'tcx, M::Provenance>,
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
index 0e824f3f592..b812e89854b 100644
--- a/compiler/rustc_const_eval/src/interpret/visitor.rs
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -6,17 +6,18 @@ use rustc_middle::mir::interpret::InterpResult;
 use rustc_middle::ty::{self, Ty};
 use rustc_target::abi::FieldIdx;
 use rustc_target::abi::{FieldsShape, VariantIdx, Variants};
+use tracing::trace;
 
 use std::num::NonZero;
 
-use super::{InterpCx, MPlaceTy, Machine, Projectable};
+use super::{throw_inval, InterpCx, MPlaceTy, Machine, Projectable};
 
 /// How to traverse a value and what to do when we are at the leaves.
-pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
+pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized {
     type V: Projectable<'tcx, M::Provenance> + From<MPlaceTy<'tcx, M::Provenance>>;
 
     /// The visitor must have an `InterpCx` in it.
-    fn ecx(&self) -> &InterpCx<'mir, 'tcx, M>;
+    fn ecx(&self) -> &InterpCx<'tcx, M>;
 
     /// `read_discriminant` can be hooked for better error messages.
     #[inline(always)]
@@ -88,22 +89,22 @@ pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
         // Special treatment for special types, where the (static) layout is not sufficient.
         match *ty.kind() {
             // If it is a trait object, switch to the real type that was used to create it.
-            ty::Dynamic(_, _, ty::Dyn) => {
+            ty::Dynamic(data, _, ty::Dyn) => {
                 // Dyn types. This is unsized, and the actual dynamic type of the data is given by the
                 // vtable stored in the place metadata.
                 // unsized values are never immediate, so we can assert_mem_place
                 let op = v.to_op(self.ecx())?;
                 let dest = op.assert_mem_place();
-                let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0;
+                let inner_mplace = self.ecx().unpack_dyn_trait(&dest, data)?.0;
                 trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
                 // recurse with the inner type
                 return self.visit_field(v, 0, &inner_mplace.into());
             }
-            ty::Dynamic(_, _, ty::DynStar) => {
+            ty::Dynamic(data, _, ty::DynStar) => {
                 // DynStar types. Very different from a dyn type (but strangely part of the
                 // same variant in `TyKind`): These are pairs where the 2nd component is the
                 // vtable, and the first component is the data (which must be ptr-sized).
-                let data = self.ecx().unpack_dyn_star(v)?.0;
+                let data = self.ecx().unpack_dyn_star(v, data)?.0;
                 return self.visit_field(v, 0, &data);
             }
             // Slices do not need special handling here: they have `Array` field
diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs
index 50420aaec04..3a7c87c1cad 100644
--- a/compiler/rustc_const_eval/src/lib.rs
+++ b/compiler/rustc_const_eval/src/lib.rs
@@ -1,9 +1,3 @@
-/*!
-
-Rust MIR: a lowered representation of Rust.
-
-*/
-
 #![allow(internal_features)]
 #![allow(rustc::diagnostic_outside_of_impl)]
 #![feature(rustdoc_internals)]
@@ -11,7 +5,6 @@ Rust MIR: a lowered representation of Rust.
 #![feature(assert_matches)]
 #![feature(box_patterns)]
 #![feature(decl_macro)]
-#![feature(generic_nonzero)]
 #![feature(let_chains)]
 #![feature(slice_ptr_get)]
 #![feature(strict_provenance)]
@@ -21,15 +14,10 @@ Rust MIR: a lowered representation of Rust.
 #![feature(yeet_expr)]
 #![feature(if_let_guard)]
 
-#[macro_use]
-extern crate tracing;
-#[macro_use]
-extern crate rustc_middle;
-
+pub mod check_consts;
 pub mod const_eval;
 mod errors;
 pub mod interpret;
-pub mod transform;
 pub mod util;
 
 use std::sync::atomic::AtomicBool;
diff --git a/compiler/rustc_const_eval/src/transform/mod.rs b/compiler/rustc_const_eval/src/transform/mod.rs
deleted file mode 100644
index e3582c7d317..00000000000
--- a/compiler/rustc_const_eval/src/transform/mod.rs
+++ /dev/null
@@ -1,2 +0,0 @@
-pub mod check_consts;
-pub mod validate;
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
deleted file mode 100644
index a499e4b980f..00000000000
--- a/compiler/rustc_const_eval/src/transform/validate.rs
+++ /dev/null
@@ -1,1380 +0,0 @@
-//! Validates the MIR to ensure that invariants are upheld.
-
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_index::bit_set::BitSet;
-use rustc_index::IndexVec;
-use rustc_infer::traits::Reveal;
-use rustc_middle::mir::coverage::CoverageKind;
-use rustc_middle::mir::interpret::Scalar;
-use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor};
-use rustc_middle::mir::*;
-use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt, Variance};
-use rustc_target::abi::{Size, FIRST_VARIANT};
-use rustc_target::spec::abi::Abi;
-
-use crate::util::is_within_packed;
-
-use crate::util::relate_types;
-
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-enum EdgeKind {
-    Unwind,
-    Normal,
-}
-
-pub struct Validator {
-    /// Describes at which point in the pipeline this validation is happening.
-    pub when: String,
-    /// The phase for which we are upholding the dialect. If the given phase forbids a specific
-    /// element, this validator will now emit errors if that specific element is encountered.
-    /// Note that phases that change the dialect cause all *following* phases to check the
-    /// invariants of the new dialect. A phase that changes dialects never checks the new invariants
-    /// itself.
-    pub mir_phase: MirPhase,
-}
-
-impl<'tcx> MirPass<'tcx> for Validator {
-    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-        // FIXME(JakobDegen): These bodies never instantiated in codegend anyway, so it's not
-        // terribly important that they pass the validator. However, I think other passes might
-        // still see them, in which case they might be surprised. It would probably be better if we
-        // didn't put this through the MIR pipeline at all.
-        if matches!(body.source.instance, InstanceDef::Intrinsic(..) | InstanceDef::Virtual(..)) {
-            return;
-        }
-        let def_id = body.source.def_id();
-        let mir_phase = self.mir_phase;
-        let param_env = match mir_phase.reveal() {
-            Reveal::UserFacing => tcx.param_env(def_id),
-            Reveal::All => tcx.param_env_reveal_all_normalized(def_id),
-        };
-
-        let can_unwind = if mir_phase <= MirPhase::Runtime(RuntimePhase::Initial) {
-            // In this case `AbortUnwindingCalls` haven't yet been executed.
-            true
-        } else if !tcx.def_kind(def_id).is_fn_like() {
-            true
-        } else {
-            let body_ty = tcx.type_of(def_id).skip_binder();
-            let body_abi = match body_ty.kind() {
-                ty::FnDef(..) => body_ty.fn_sig(tcx).abi(),
-                ty::Closure(..) => Abi::RustCall,
-                ty::CoroutineClosure(..) => Abi::RustCall,
-                ty::Coroutine(..) => Abi::Rust,
-                // No need to do MIR validation on error bodies
-                ty::Error(_) => return,
-                _ => {
-                    span_bug!(body.span, "unexpected body ty: {:?} phase {:?}", body_ty, mir_phase)
-                }
-            };
-
-            ty::layout::fn_can_unwind(tcx, Some(def_id), body_abi)
-        };
-
-        let mut cfg_checker = CfgChecker {
-            when: &self.when,
-            body,
-            tcx,
-            mir_phase,
-            unwind_edge_count: 0,
-            reachable_blocks: traversal::reachable_as_bitset(body),
-            value_cache: FxHashSet::default(),
-            can_unwind,
-        };
-        cfg_checker.visit_body(body);
-        cfg_checker.check_cleanup_control_flow();
-
-        // Also run the TypeChecker.
-        for (location, msg) in validate_types(tcx, self.mir_phase, param_env, body, body) {
-            cfg_checker.fail(location, msg);
-        }
-
-        if let MirPhase::Runtime(_) = body.phase {
-            if let ty::InstanceDef::Item(_) = body.source.instance {
-                if body.has_free_regions() {
-                    cfg_checker.fail(
-                        Location::START,
-                        format!("Free regions in optimized {} MIR", body.phase.name()),
-                    );
-                }
-            }
-        }
-
-        // Enforce that coroutine-closure layouts are identical.
-        if let Some(layout) = body.coroutine_layout_raw()
-            && let Some(by_move_body) = body.coroutine_by_move_body()
-            && let Some(by_move_layout) = by_move_body.coroutine_layout_raw()
-        {
-            // FIXME(async_closures): We could do other validation here?
-            if layout.variant_fields.len() != by_move_layout.variant_fields.len() {
-                cfg_checker.fail(
-                    Location::START,
-                    format!(
-                        "Coroutine layout has different number of variant fields from \
-                        by-move coroutine layout:\n\
-                        layout: {layout:#?}\n\
-                        by_move_layout: {by_move_layout:#?}",
-                    ),
-                );
-            }
-        }
-    }
-}
-
-struct CfgChecker<'a, 'tcx> {
-    when: &'a str,
-    body: &'a Body<'tcx>,
-    tcx: TyCtxt<'tcx>,
-    mir_phase: MirPhase,
-    unwind_edge_count: usize,
-    reachable_blocks: BitSet<BasicBlock>,
-    value_cache: FxHashSet<u128>,
-    // If `false`, then the MIR must not contain `UnwindAction::Continue` or
-    // `TerminatorKind::Resume`.
-    can_unwind: bool,
-}
-
-impl<'a, 'tcx> CfgChecker<'a, 'tcx> {
-    #[track_caller]
-    fn fail(&self, location: Location, msg: impl AsRef<str>) {
-        // We might see broken MIR when other errors have already occurred.
-        assert!(
-            self.tcx.dcx().has_errors().is_some(),
-            "broken MIR in {:?} ({}) at {:?}:\n{}",
-            self.body.source.instance,
-            self.when,
-            location,
-            msg.as_ref(),
-        );
-    }
-
-    fn check_edge(&mut self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
-        if bb == START_BLOCK {
-            self.fail(location, "start block must not have predecessors")
-        }
-        if let Some(bb) = self.body.basic_blocks.get(bb) {
-            let src = self.body.basic_blocks.get(location.block).unwrap();
-            match (src.is_cleanup, bb.is_cleanup, edge_kind) {
-                // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
-                (false, false, EdgeKind::Normal)
-                // Cleanup blocks can jump to cleanup blocks along non-unwind edges
-                | (true, true, EdgeKind::Normal) => {}
-                // Non-cleanup blocks can jump to cleanup blocks along unwind edges
-                (false, true, EdgeKind::Unwind) => {
-                    self.unwind_edge_count += 1;
-                }
-                // All other jumps are invalid
-                _ => {
-                    self.fail(
-                        location,
-                        format!(
-                            "{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})",
-                            edge_kind,
-                            bb,
-                            src.is_cleanup,
-                            bb.is_cleanup,
-                        )
-                    )
-                }
-            }
-        } else {
-            self.fail(location, format!("encountered jump to invalid basic block {bb:?}"))
-        }
-    }
-
-    fn check_cleanup_control_flow(&self) {
-        if self.unwind_edge_count <= 1 {
-            return;
-        }
-        let doms = self.body.basic_blocks.dominators();
-        let mut post_contract_node = FxHashMap::default();
-        // Reusing the allocation across invocations of the closure
-        let mut dom_path = vec![];
-        let mut get_post_contract_node = |mut bb| {
-            let root = loop {
-                if let Some(root) = post_contract_node.get(&bb) {
-                    break *root;
-                }
-                let parent = doms.immediate_dominator(bb).unwrap();
-                dom_path.push(bb);
-                if !self.body.basic_blocks[parent].is_cleanup {
-                    break bb;
-                }
-                bb = parent;
-            };
-            for bb in dom_path.drain(..) {
-                post_contract_node.insert(bb, root);
-            }
-            root
-        };
-
-        let mut parent = IndexVec::from_elem(None, &self.body.basic_blocks);
-        for (bb, bb_data) in self.body.basic_blocks.iter_enumerated() {
-            if !bb_data.is_cleanup || !self.reachable_blocks.contains(bb) {
-                continue;
-            }
-            let bb = get_post_contract_node(bb);
-            for s in bb_data.terminator().successors() {
-                let s = get_post_contract_node(s);
-                if s == bb {
-                    continue;
-                }
-                let parent = &mut parent[bb];
-                match parent {
-                    None => {
-                        *parent = Some(s);
-                    }
-                    Some(e) if *e == s => (),
-                    Some(e) => self.fail(
-                        Location { block: bb, statement_index: 0 },
-                        format!(
-                            "Cleanup control flow violation: The blocks dominated by {:?} have edges to both {:?} and {:?}",
-                            bb,
-                            s,
-                            *e
-                        )
-                    ),
-                }
-            }
-        }
-
-        // Check for cycles
-        let mut stack = FxHashSet::default();
-        for i in 0..parent.len() {
-            let mut bb = BasicBlock::from_usize(i);
-            stack.clear();
-            stack.insert(bb);
-            loop {
-                let Some(parent) = parent[bb].take() else { break };
-                let no_cycle = stack.insert(parent);
-                if !no_cycle {
-                    self.fail(
-                        Location { block: bb, statement_index: 0 },
-                        format!(
-                            "Cleanup control flow violation: Cycle involving edge {bb:?} -> {parent:?}",
-                        ),
-                    );
-                    break;
-                }
-                bb = parent;
-            }
-        }
-    }
-
-    fn check_unwind_edge(&mut self, location: Location, unwind: UnwindAction) {
-        let is_cleanup = self.body.basic_blocks[location.block].is_cleanup;
-        match unwind {
-            UnwindAction::Cleanup(unwind) => {
-                if is_cleanup {
-                    self.fail(location, "`UnwindAction::Cleanup` in cleanup block");
-                }
-                self.check_edge(location, unwind, EdgeKind::Unwind);
-            }
-            UnwindAction::Continue => {
-                if is_cleanup {
-                    self.fail(location, "`UnwindAction::Continue` in cleanup block");
-                }
-
-                if !self.can_unwind {
-                    self.fail(location, "`UnwindAction::Continue` in no-unwind function");
-                }
-            }
-            UnwindAction::Terminate(UnwindTerminateReason::InCleanup) => {
-                if !is_cleanup {
-                    self.fail(
-                        location,
-                        "`UnwindAction::Terminate(InCleanup)` in a non-cleanup block",
-                    );
-                }
-            }
-            // These are allowed everywhere.
-            UnwindAction::Unreachable | UnwindAction::Terminate(UnwindTerminateReason::Abi) => (),
-        }
-    }
-
-    fn is_critical_call_edge(&self, target: Option<BasicBlock>, unwind: UnwindAction) -> bool {
-        let Some(target) = target else { return false };
-        matches!(unwind, UnwindAction::Cleanup(_) | UnwindAction::Terminate(_))
-            && self.body.basic_blocks.predecessors()[target].len() > 1
-    }
-}
-
-impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
-    fn visit_local(&mut self, local: Local, _context: PlaceContext, location: Location) {
-        if self.body.local_decls.get(local).is_none() {
-            self.fail(
-                location,
-                format!("local {local:?} has no corresponding declaration in `body.local_decls`"),
-            );
-        }
-    }
-
-    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
-        match &statement.kind {
-            StatementKind::AscribeUserType(..) => {
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(
-                        location,
-                        "`AscribeUserType` should have been removed after drop lowering phase",
-                    );
-                }
-            }
-            StatementKind::FakeRead(..) => {
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(
-                        location,
-                        "`FakeRead` should have been removed after drop lowering phase",
-                    );
-                }
-            }
-            StatementKind::SetDiscriminant { .. } => {
-                if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
-                }
-            }
-            StatementKind::Deinit(..) => {
-                if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(location, "`Deinit`is not allowed until deaggregation");
-                }
-            }
-            StatementKind::Retag(kind, _) => {
-                // FIXME(JakobDegen) The validator should check that `self.mir_phase <
-                // DropsLowered`. However, this causes ICEs with generation of drop shims, which
-                // seem to fail to set their `MirPhase` correctly.
-                if matches!(kind, RetagKind::TwoPhase) {
-                    self.fail(location, format!("explicit `{kind:?}` is forbidden"));
-                }
-            }
-            StatementKind::Coverage(kind) => {
-                if self.mir_phase >= MirPhase::Analysis(AnalysisPhase::PostCleanup)
-                    && let CoverageKind::BlockMarker { .. } | CoverageKind::SpanMarker { .. } = kind
-                {
-                    self.fail(
-                        location,
-                        format!("{kind:?} should have been removed after analysis"),
-                    );
-                }
-            }
-            StatementKind::Assign(..)
-            | StatementKind::StorageLive(_)
-            | StatementKind::StorageDead(_)
-            | StatementKind::Intrinsic(_)
-            | StatementKind::ConstEvalCounter
-            | StatementKind::PlaceMention(..)
-            | StatementKind::Nop => {}
-        }
-
-        self.super_statement(statement, location);
-    }
-
-    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
-        match &terminator.kind {
-            TerminatorKind::Goto { target } => {
-                self.check_edge(location, *target, EdgeKind::Normal);
-            }
-            TerminatorKind::SwitchInt { targets, discr: _ } => {
-                for (_, target) in targets.iter() {
-                    self.check_edge(location, target, EdgeKind::Normal);
-                }
-                self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
-
-                self.value_cache.clear();
-                self.value_cache.extend(targets.iter().map(|(value, _)| value));
-                let has_duplicates = targets.iter().len() != self.value_cache.len();
-                if has_duplicates {
-                    self.fail(
-                        location,
-                        format!(
-                            "duplicated values in `SwitchInt` terminator: {:?}",
-                            terminator.kind,
-                        ),
-                    );
-                }
-            }
-            TerminatorKind::Drop { target, unwind, .. } => {
-                self.check_edge(location, *target, EdgeKind::Normal);
-                self.check_unwind_edge(location, *unwind);
-            }
-            TerminatorKind::Call { args, destination, target, unwind, .. } => {
-                if let Some(target) = target {
-                    self.check_edge(location, *target, EdgeKind::Normal);
-                }
-                self.check_unwind_edge(location, *unwind);
-
-                // The code generation assumes that there are no critical call edges. The assumption
-                // is used to simplify inserting code that should be executed along the return edge
-                // from the call. FIXME(tmiasko): Since this is a strictly code generation concern,
-                // the code generation should be responsible for handling it.
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Optimized)
-                    && self.is_critical_call_edge(*target, *unwind)
-                {
-                    self.fail(
-                        location,
-                        format!(
-                            "encountered critical edge in `Call` terminator {:?}",
-                            terminator.kind,
-                        ),
-                    );
-                }
-
-                // The call destination place and Operand::Move place used as an argument might be
-                // passed by a reference to the callee. Consequently they cannot be packed.
-                if is_within_packed(self.tcx, &self.body.local_decls, *destination).is_some() {
-                    // This is bad! The callee will expect the memory to be aligned.
-                    self.fail(
-                        location,
-                        format!(
-                            "encountered packed place in `Call` terminator destination: {:?}",
-                            terminator.kind,
-                        ),
-                    );
-                }
-                for arg in args {
-                    if let Operand::Move(place) = &arg.node {
-                        if is_within_packed(self.tcx, &self.body.local_decls, *place).is_some() {
-                            // This is bad! The callee will expect the memory to be aligned.
-                            self.fail(
-                                location,
-                                format!(
-                                    "encountered `Move` of a packed place in `Call` terminator: {:?}",
-                                    terminator.kind,
-                                ),
-                            );
-                        }
-                    }
-                }
-            }
-            TerminatorKind::Assert { target, unwind, .. } => {
-                self.check_edge(location, *target, EdgeKind::Normal);
-                self.check_unwind_edge(location, *unwind);
-            }
-            TerminatorKind::Yield { resume, drop, .. } => {
-                if self.body.coroutine.is_none() {
-                    self.fail(location, "`Yield` cannot appear outside coroutine bodies");
-                }
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(location, "`Yield` should have been replaced by coroutine lowering");
-                }
-                self.check_edge(location, *resume, EdgeKind::Normal);
-                if let Some(drop) = drop {
-                    self.check_edge(location, *drop, EdgeKind::Normal);
-                }
-            }
-            TerminatorKind::FalseEdge { real_target, imaginary_target } => {
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(
-                        location,
-                        "`FalseEdge` should have been removed after drop elaboration",
-                    );
-                }
-                self.check_edge(location, *real_target, EdgeKind::Normal);
-                self.check_edge(location, *imaginary_target, EdgeKind::Normal);
-            }
-            TerminatorKind::FalseUnwind { real_target, unwind } => {
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(
-                        location,
-                        "`FalseUnwind` should have been removed after drop elaboration",
-                    );
-                }
-                self.check_edge(location, *real_target, EdgeKind::Normal);
-                self.check_unwind_edge(location, *unwind);
-            }
-            TerminatorKind::InlineAsm { targets, unwind, .. } => {
-                for &target in targets {
-                    self.check_edge(location, target, EdgeKind::Normal);
-                }
-                self.check_unwind_edge(location, *unwind);
-            }
-            TerminatorKind::CoroutineDrop => {
-                if self.body.coroutine.is_none() {
-                    self.fail(location, "`CoroutineDrop` cannot appear outside coroutine bodies");
-                }
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(
-                        location,
-                        "`CoroutineDrop` should have been replaced by coroutine lowering",
-                    );
-                }
-            }
-            TerminatorKind::UnwindResume => {
-                let bb = location.block;
-                if !self.body.basic_blocks[bb].is_cleanup {
-                    self.fail(location, "Cannot `UnwindResume` from non-cleanup basic block")
-                }
-                if !self.can_unwind {
-                    self.fail(location, "Cannot `UnwindResume` in a function that cannot unwind")
-                }
-            }
-            TerminatorKind::UnwindTerminate(_) => {
-                let bb = location.block;
-                if !self.body.basic_blocks[bb].is_cleanup {
-                    self.fail(location, "Cannot `UnwindTerminate` from non-cleanup basic block")
-                }
-            }
-            TerminatorKind::Return => {
-                let bb = location.block;
-                if self.body.basic_blocks[bb].is_cleanup {
-                    self.fail(location, "Cannot `Return` from cleanup basic block")
-                }
-            }
-            TerminatorKind::Unreachable => {}
-        }
-
-        self.super_terminator(terminator, location);
-    }
-
-    fn visit_source_scope(&mut self, scope: SourceScope) {
-        if self.body.source_scopes.get(scope).is_none() {
-            self.tcx.dcx().span_bug(
-                self.body.span,
-                format!(
-                    "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
-                    self.body.source.instance, self.when, scope,
-                ),
-            );
-        }
-    }
-}
-
-/// A faster version of the validation pass that only checks those things which may break when
-/// instantiating any generic parameters.
-///
-/// `caller_body` is used to detect cycles in MIR inlining and MIR validation before
-/// `optimized_mir` is available.
-pub fn validate_types<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    mir_phase: MirPhase,
-    param_env: ty::ParamEnv<'tcx>,
-    body: &Body<'tcx>,
-    caller_body: &Body<'tcx>,
-) -> Vec<(Location, String)> {
-    let mut type_checker =
-        TypeChecker { body, caller_body, tcx, param_env, mir_phase, failures: Vec::new() };
-    type_checker.visit_body(body);
-    type_checker.failures
-}
-
-struct TypeChecker<'a, 'tcx> {
-    body: &'a Body<'tcx>,
-    caller_body: &'a Body<'tcx>,
-    tcx: TyCtxt<'tcx>,
-    param_env: ParamEnv<'tcx>,
-    mir_phase: MirPhase,
-    failures: Vec<(Location, String)>,
-}
-
-impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
-    fn fail(&mut self, location: Location, msg: impl Into<String>) {
-        self.failures.push((location, msg.into()));
-    }
-
-    /// Check if src can be assigned into dest.
-    /// This is not precise, it will accept some incorrect assignments.
-    fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
-        // Fast path before we normalize.
-        if src == dest {
-            // Equal types, all is good.
-            return true;
-        }
-
-        // We sometimes have to use `defining_opaque_types` for subtyping
-        // to succeed here and figuring out how exactly that should work
-        // is annoying. It is harmless enough to just not validate anything
-        // in that case. We still check this after analysis as all opaque
-        // types have been revealed at this point.
-        if (src, dest).has_opaque_types() {
-            return true;
-        }
-
-        // After borrowck subtyping should be fully explicit via
-        // `Subtype` projections.
-        let variance = if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-            Variance::Invariant
-        } else {
-            Variance::Covariant
-        };
-
-        crate::util::relate_types(self.tcx, self.param_env, variance, src, dest)
-    }
-}
-
-impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
-    fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
-        // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
-        if self.tcx.sess.opts.unstable_opts.validate_mir
-            && self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial)
-        {
-            // `Operand::Copy` is only supposed to be used with `Copy` types.
-            if let Operand::Copy(place) = operand {
-                let ty = place.ty(&self.body.local_decls, self.tcx).ty;
-
-                if !ty.is_copy_modulo_regions(self.tcx, self.param_env) {
-                    self.fail(location, format!("`Operand::Copy` with non-`Copy` type {ty}"));
-                }
-            }
-        }
-
-        self.super_operand(operand, location);
-    }
-
-    fn visit_projection_elem(
-        &mut self,
-        place_ref: PlaceRef<'tcx>,
-        elem: PlaceElem<'tcx>,
-        context: PlaceContext,
-        location: Location,
-    ) {
-        match elem {
-            ProjectionElem::OpaqueCast(ty)
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) =>
-            {
-                self.fail(
-                    location,
-                    format!("explicit opaque type cast to `{ty}` after `RevealAll`"),
-                )
-            }
-            ProjectionElem::Index(index) => {
-                let index_ty = self.body.local_decls[index].ty;
-                if index_ty != self.tcx.types.usize {
-                    self.fail(location, format!("bad index ({index_ty:?} != usize)"))
-                }
-            }
-            ProjectionElem::Deref
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::PostCleanup) =>
-            {
-                let base_ty = place_ref.ty(&self.body.local_decls, self.tcx).ty;
-
-                if base_ty.is_box() {
-                    self.fail(
-                        location,
-                        format!("{base_ty:?} dereferenced after ElaborateBoxDerefs"),
-                    )
-                }
-            }
-            ProjectionElem::Field(f, ty) => {
-                let parent_ty = place_ref.ty(&self.body.local_decls, self.tcx);
-                let fail_out_of_bounds = |this: &mut Self, location| {
-                    this.fail(location, format!("Out of bounds field {f:?} for {parent_ty:?}"));
-                };
-                let check_equal = |this: &mut Self, location, f_ty| {
-                    if !this.mir_assign_valid_types(ty, f_ty) {
-                        this.fail(
-                            location,
-                            format!(
-                                "Field projection `{place_ref:?}.{f:?}` specified type `{ty:?}`, but actual type is `{f_ty:?}`"
-                            )
-                        )
-                    }
-                };
-
-                let kind = match parent_ty.ty.kind() {
-                    &ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
-                        self.tcx.type_of(def_id).instantiate(self.tcx, args).kind()
-                    }
-                    kind => kind,
-                };
-
-                match kind {
-                    ty::Tuple(fields) => {
-                        let Some(f_ty) = fields.get(f.as_usize()) else {
-                            fail_out_of_bounds(self, location);
-                            return;
-                        };
-                        check_equal(self, location, *f_ty);
-                    }
-                    ty::Adt(adt_def, args) => {
-                        let var = parent_ty.variant_index.unwrap_or(FIRST_VARIANT);
-                        let Some(field) = adt_def.variant(var).fields.get(f) else {
-                            fail_out_of_bounds(self, location);
-                            return;
-                        };
-                        check_equal(self, location, field.ty(self.tcx, args));
-                    }
-                    ty::Closure(_, args) => {
-                        let args = args.as_closure();
-                        let Some(&f_ty) = args.upvar_tys().get(f.as_usize()) else {
-                            fail_out_of_bounds(self, location);
-                            return;
-                        };
-                        check_equal(self, location, f_ty);
-                    }
-                    ty::CoroutineClosure(_, args) => {
-                        let args = args.as_coroutine_closure();
-                        let Some(&f_ty) = args.upvar_tys().get(f.as_usize()) else {
-                            fail_out_of_bounds(self, location);
-                            return;
-                        };
-                        check_equal(self, location, f_ty);
-                    }
-                    &ty::Coroutine(def_id, args) => {
-                        let f_ty = if let Some(var) = parent_ty.variant_index {
-                            // If we're currently validating an inlined copy of this body,
-                            // then it will no longer be parameterized over the original
-                            // args of the coroutine. Otherwise, we prefer to use this body
-                            // since we may be in the process of computing this MIR in the
-                            // first place.
-                            let layout = if def_id == self.caller_body.source.def_id() {
-                                // FIXME: This is not right for async closures.
-                                self.caller_body.coroutine_layout_raw()
-                            } else {
-                                self.tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty())
-                            };
-
-                            let Some(layout) = layout else {
-                                self.fail(
-                                    location,
-                                    format!("No coroutine layout for {parent_ty:?}"),
-                                );
-                                return;
-                            };
-
-                            let Some(&local) = layout.variant_fields[var].get(f) else {
-                                fail_out_of_bounds(self, location);
-                                return;
-                            };
-
-                            let Some(f_ty) = layout.field_tys.get(local) else {
-                                self.fail(
-                                    location,
-                                    format!("Out of bounds local {local:?} for {parent_ty:?}"),
-                                );
-                                return;
-                            };
-
-                            ty::EarlyBinder::bind(f_ty.ty).instantiate(self.tcx, args)
-                        } else {
-                            let Some(&f_ty) = args.as_coroutine().prefix_tys().get(f.index())
-                            else {
-                                fail_out_of_bounds(self, location);
-                                return;
-                            };
-
-                            f_ty
-                        };
-
-                        check_equal(self, location, f_ty);
-                    }
-                    _ => {
-                        self.fail(location, format!("{:?} does not have fields", parent_ty.ty));
-                    }
-                }
-            }
-            ProjectionElem::Subtype(ty) => {
-                if !relate_types(
-                    self.tcx,
-                    self.param_env,
-                    Variance::Covariant,
-                    ty,
-                    place_ref.ty(&self.body.local_decls, self.tcx).ty,
-                ) {
-                    self.fail(
-                        location,
-                        format!(
-                            "Failed subtyping {ty:#?} and {:#?}",
-                            place_ref.ty(&self.body.local_decls, self.tcx).ty
-                        ),
-                    )
-                }
-            }
-            _ => {}
-        }
-        self.super_projection_elem(place_ref, elem, context, location);
-    }
-
-    fn visit_var_debug_info(&mut self, debuginfo: &VarDebugInfo<'tcx>) {
-        if let Some(box VarDebugInfoFragment { ty, ref projection }) = debuginfo.composite {
-            if ty.is_union() || ty.is_enum() {
-                self.fail(
-                    START_BLOCK.start_location(),
-                    format!("invalid type {ty:?} in debuginfo for {:?}", debuginfo.name),
-                );
-            }
-            if projection.is_empty() {
-                self.fail(
-                    START_BLOCK.start_location(),
-                    format!("invalid empty projection in debuginfo for {:?}", debuginfo.name),
-                );
-            }
-            if projection.iter().any(|p| !matches!(p, PlaceElem::Field(..))) {
-                self.fail(
-                    START_BLOCK.start_location(),
-                    format!(
-                        "illegal projection {:?} in debuginfo for {:?}",
-                        projection, debuginfo.name
-                    ),
-                );
-            }
-        }
-        match debuginfo.value {
-            VarDebugInfoContents::Const(_) => {}
-            VarDebugInfoContents::Place(place) => {
-                if place.projection.iter().any(|p| !p.can_use_in_debuginfo()) {
-                    self.fail(
-                        START_BLOCK.start_location(),
-                        format!("illegal place {:?} in debuginfo for {:?}", place, debuginfo.name),
-                    );
-                }
-            }
-        }
-        self.super_var_debug_info(debuginfo);
-    }
-
-    fn visit_place(&mut self, place: &Place<'tcx>, cntxt: PlaceContext, location: Location) {
-        // Set off any `bug!`s in the type computation code
-        let _ = place.ty(&self.body.local_decls, self.tcx);
-
-        if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial)
-            && place.projection.len() > 1
-            && cntxt != PlaceContext::NonUse(NonUseContext::VarDebugInfo)
-            && place.projection[1..].contains(&ProjectionElem::Deref)
-        {
-            self.fail(location, format!("{place:?}, has deref at the wrong place"));
-        }
-
-        self.super_place(place, cntxt, location);
-    }
-
-    fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
-        macro_rules! check_kinds {
-            ($t:expr, $text:literal, $typat:pat) => {
-                if !matches!(($t).kind(), $typat) {
-                    self.fail(location, format!($text, $t));
-                }
-            };
-        }
-        match rvalue {
-            Rvalue::Use(_) | Rvalue::CopyForDeref(_) => {}
-            Rvalue::Aggregate(kind, fields) => match **kind {
-                AggregateKind::Tuple => {}
-                AggregateKind::Array(dest) => {
-                    for src in fields {
-                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
-                            self.fail(location, "array field has the wrong type");
-                        }
-                    }
-                }
-                AggregateKind::Adt(def_id, idx, args, _, Some(field)) => {
-                    let adt_def = self.tcx.adt_def(def_id);
-                    assert!(adt_def.is_union());
-                    assert_eq!(idx, FIRST_VARIANT);
-                    let dest_ty = self.tcx.normalize_erasing_regions(
-                        self.param_env,
-                        adt_def.non_enum_variant().fields[field].ty(self.tcx, args),
-                    );
-                    if fields.len() == 1 {
-                        let src_ty = fields.raw[0].ty(self.body, self.tcx);
-                        if !self.mir_assign_valid_types(src_ty, dest_ty) {
-                            self.fail(location, "union field has the wrong type");
-                        }
-                    } else {
-                        self.fail(location, "unions should have one initialized field");
-                    }
-                }
-                AggregateKind::Adt(def_id, idx, args, _, None) => {
-                    let adt_def = self.tcx.adt_def(def_id);
-                    assert!(!adt_def.is_union());
-                    let variant = &adt_def.variants()[idx];
-                    if variant.fields.len() != fields.len() {
-                        self.fail(location, "adt has the wrong number of initialized fields");
-                    }
-                    for (src, dest) in std::iter::zip(fields, &variant.fields) {
-                        let dest_ty = self
-                            .tcx
-                            .normalize_erasing_regions(self.param_env, dest.ty(self.tcx, args));
-                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest_ty) {
-                            self.fail(location, "adt field has the wrong type");
-                        }
-                    }
-                }
-                AggregateKind::Closure(_, args) => {
-                    let upvars = args.as_closure().upvar_tys();
-                    if upvars.len() != fields.len() {
-                        self.fail(location, "closure has the wrong number of initialized fields");
-                    }
-                    for (src, dest) in std::iter::zip(fields, upvars) {
-                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
-                            self.fail(location, "closure field has the wrong type");
-                        }
-                    }
-                }
-                AggregateKind::Coroutine(_, args) => {
-                    let upvars = args.as_coroutine().upvar_tys();
-                    if upvars.len() != fields.len() {
-                        self.fail(location, "coroutine has the wrong number of initialized fields");
-                    }
-                    for (src, dest) in std::iter::zip(fields, upvars) {
-                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
-                            self.fail(location, "coroutine field has the wrong type");
-                        }
-                    }
-                }
-                AggregateKind::CoroutineClosure(_, args) => {
-                    let upvars = args.as_coroutine_closure().upvar_tys();
-                    if upvars.len() != fields.len() {
-                        self.fail(
-                            location,
-                            "coroutine-closure has the wrong number of initialized fields",
-                        );
-                    }
-                    for (src, dest) in std::iter::zip(fields, upvars) {
-                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
-                            self.fail(location, "coroutine-closure field has the wrong type");
-                        }
-                    }
-                }
-            },
-            Rvalue::Ref(_, BorrowKind::Fake, _) => {
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(
-                        location,
-                        "`Assign` statement with a `Fake` borrow should have been removed in runtime MIR",
-                    );
-                }
-            }
-            Rvalue::Ref(..) => {}
-            Rvalue::Len(p) => {
-                let pty = p.ty(&self.body.local_decls, self.tcx).ty;
-                check_kinds!(
-                    pty,
-                    "Cannot compute length of non-array type {:?}",
-                    ty::Array(..) | ty::Slice(..)
-                );
-            }
-            Rvalue::BinaryOp(op, vals) => {
-                use BinOp::*;
-                let a = vals.0.ty(&self.body.local_decls, self.tcx);
-                let b = vals.1.ty(&self.body.local_decls, self.tcx);
-                if crate::util::binop_right_homogeneous(*op) {
-                    if let Eq | Lt | Le | Ne | Ge | Gt = op {
-                        // The function pointer types can have lifetimes
-                        if !self.mir_assign_valid_types(a, b) {
-                            self.fail(
-                                location,
-                                format!("Cannot {op:?} compare incompatible types {a:?} and {b:?}"),
-                            );
-                        }
-                    } else if a != b {
-                        self.fail(
-                            location,
-                            format!(
-                                "Cannot perform binary op {op:?} on unequal types {a:?} and {b:?}"
-                            ),
-                        );
-                    }
-                }
-
-                match op {
-                    Offset => {
-                        check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..));
-                        if b != self.tcx.types.isize && b != self.tcx.types.usize {
-                            self.fail(location, format!("Cannot offset by non-isize type {b:?}"));
-                        }
-                    }
-                    Eq | Lt | Le | Ne | Ge | Gt => {
-                        for x in [a, b] {
-                            check_kinds!(
-                                x,
-                                "Cannot {op:?} compare type {:?}",
-                                ty::Bool
-                                    | ty::Char
-                                    | ty::Int(..)
-                                    | ty::Uint(..)
-                                    | ty::Float(..)
-                                    | ty::RawPtr(..)
-                                    | ty::FnPtr(..)
-                            )
-                        }
-                    }
-                    Cmp => {
-                        for x in [a, b] {
-                            check_kinds!(
-                                x,
-                                "Cannot three-way compare non-integer type {:?}",
-                                ty::Char | ty::Uint(..) | ty::Int(..)
-                            )
-                        }
-                    }
-                    AddUnchecked | SubUnchecked | MulUnchecked | Shl | ShlUnchecked | Shr
-                    | ShrUnchecked => {
-                        for x in [a, b] {
-                            check_kinds!(
-                                x,
-                                "Cannot {op:?} non-integer type {:?}",
-                                ty::Uint(..) | ty::Int(..)
-                            )
-                        }
-                    }
-                    BitAnd | BitOr | BitXor => {
-                        for x in [a, b] {
-                            check_kinds!(
-                                x,
-                                "Cannot perform bitwise op {op:?} on type {:?}",
-                                ty::Uint(..) | ty::Int(..) | ty::Bool
-                            )
-                        }
-                    }
-                    Add | Sub | Mul | Div | Rem => {
-                        for x in [a, b] {
-                            check_kinds!(
-                                x,
-                                "Cannot perform arithmetic {op:?} on type {:?}",
-                                ty::Uint(..) | ty::Int(..) | ty::Float(..)
-                            )
-                        }
-                    }
-                }
-            }
-            Rvalue::CheckedBinaryOp(op, vals) => {
-                use BinOp::*;
-                let a = vals.0.ty(&self.body.local_decls, self.tcx);
-                let b = vals.1.ty(&self.body.local_decls, self.tcx);
-                match op {
-                    Add | Sub | Mul => {
-                        for x in [a, b] {
-                            check_kinds!(
-                                x,
-                                "Cannot perform checked arithmetic on type {:?}",
-                                ty::Uint(..) | ty::Int(..)
-                            )
-                        }
-                        if a != b {
-                            self.fail(
-                                location,
-                                format!(
-                                    "Cannot perform checked arithmetic on unequal types {a:?} and {b:?}"
-                                ),
-                            );
-                        }
-                    }
-                    _ => self.fail(location, format!("There is no checked version of {op:?}")),
-                }
-            }
-            Rvalue::UnaryOp(op, operand) => {
-                let a = operand.ty(&self.body.local_decls, self.tcx);
-                match op {
-                    UnOp::Neg => {
-                        check_kinds!(a, "Cannot negate type {:?}", ty::Int(..) | ty::Float(..))
-                    }
-                    UnOp::Not => {
-                        check_kinds!(
-                            a,
-                            "Cannot binary not type {:?}",
-                            ty::Int(..) | ty::Uint(..) | ty::Bool
-                        );
-                    }
-                }
-            }
-            Rvalue::ShallowInitBox(operand, _) => {
-                let a = operand.ty(&self.body.local_decls, self.tcx);
-                check_kinds!(a, "Cannot shallow init type {:?}", ty::RawPtr(..));
-            }
-            Rvalue::Cast(kind, operand, target_type) => {
-                let op_ty = operand.ty(self.body, self.tcx);
-                match kind {
-                    CastKind::DynStar => {
-                        // FIXME(dyn-star): make sure nothing needs to be done here.
-                    }
-                    // FIXME: Add Checks for these
-                    CastKind::PointerWithExposedProvenance
-                    | CastKind::PointerExposeProvenance
-                    | CastKind::PointerCoercion(_) => {}
-                    CastKind::IntToInt | CastKind::IntToFloat => {
-                        let input_valid = op_ty.is_integral() || op_ty.is_char() || op_ty.is_bool();
-                        let target_valid = target_type.is_numeric() || target_type.is_char();
-                        if !input_valid || !target_valid {
-                            self.fail(
-                                location,
-                                format!("Wrong cast kind {kind:?} for the type {op_ty}",),
-                            );
-                        }
-                    }
-                    CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
-                        if !(op_ty.is_any_ptr() && target_type.is_unsafe_ptr()) {
-                            self.fail(location, "Can't cast {op_ty} into 'Ptr'");
-                        }
-                    }
-                    CastKind::FloatToFloat | CastKind::FloatToInt => {
-                        if !op_ty.is_floating_point() || !target_type.is_numeric() {
-                            self.fail(
-                                location,
-                                format!(
-                                    "Trying to cast non 'Float' as {kind:?} into {target_type:?}"
-                                ),
-                            );
-                        }
-                    }
-                    CastKind::Transmute => {
-                        if let MirPhase::Runtime(..) = self.mir_phase {
-                            // Unlike `mem::transmute`, a MIR `Transmute` is well-formed
-                            // for any two `Sized` types, just potentially UB to run.
-
-                            if !self
-                                .tcx
-                                .normalize_erasing_regions(self.param_env, op_ty)
-                                .is_sized(self.tcx, self.param_env)
-                            {
-                                self.fail(
-                                    location,
-                                    format!("Cannot transmute from non-`Sized` type {op_ty:?}"),
-                                );
-                            }
-                            if !self
-                                .tcx
-                                .normalize_erasing_regions(self.param_env, *target_type)
-                                .is_sized(self.tcx, self.param_env)
-                            {
-                                self.fail(
-                                    location,
-                                    format!("Cannot transmute to non-`Sized` type {target_type:?}"),
-                                );
-                            }
-                        } else {
-                            self.fail(
-                                location,
-                                format!(
-                                    "Transmute is not supported in non-runtime phase {:?}.",
-                                    self.mir_phase
-                                ),
-                            );
-                        }
-                    }
-                }
-            }
-            Rvalue::NullaryOp(NullOp::OffsetOf(indices), container) => {
-                let fail_out_of_bounds = |this: &mut Self, location, field, ty| {
-                    this.fail(location, format!("Out of bounds field {field:?} for {ty:?}"));
-                };
-
-                let mut current_ty = *container;
-
-                for (variant, field) in indices.iter() {
-                    match current_ty.kind() {
-                        ty::Tuple(fields) => {
-                            if variant != FIRST_VARIANT {
-                                self.fail(
-                                    location,
-                                    format!("tried to get variant {variant:?} of tuple"),
-                                );
-                                return;
-                            }
-                            let Some(&f_ty) = fields.get(field.as_usize()) else {
-                                fail_out_of_bounds(self, location, field, current_ty);
-                                return;
-                            };
-
-                            current_ty = self.tcx.normalize_erasing_regions(self.param_env, f_ty);
-                        }
-                        ty::Adt(adt_def, args) => {
-                            let Some(field) = adt_def.variant(variant).fields.get(field) else {
-                                fail_out_of_bounds(self, location, field, current_ty);
-                                return;
-                            };
-
-                            let f_ty = field.ty(self.tcx, args);
-                            current_ty = self.tcx.normalize_erasing_regions(self.param_env, f_ty);
-                        }
-                        _ => {
-                            self.fail(
-                                location,
-                                format!("Cannot get offset ({variant:?}, {field:?}) from type {current_ty:?}"),
-                            );
-                            return;
-                        }
-                    }
-                }
-            }
-            Rvalue::Repeat(_, _)
-            | Rvalue::ThreadLocalRef(_)
-            | Rvalue::AddressOf(_, _)
-            | Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf | NullOp::UbChecks, _)
-            | Rvalue::Discriminant(_) => {}
-        }
-        self.super_rvalue(rvalue, location);
-    }
-
-    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
-        match &statement.kind {
-            StatementKind::Assign(box (dest, rvalue)) => {
-                // LHS and RHS of the assignment must have the same type.
-                let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
-                let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
-
-                if !self.mir_assign_valid_types(right_ty, left_ty) {
-                    self.fail(
-                        location,
-                        format!(
-                            "encountered `{:?}` with incompatible types:\n\
-                            left-hand side has type: {}\n\
-                            right-hand side has type: {}",
-                            statement.kind, left_ty, right_ty,
-                        ),
-                    );
-                }
-                if let Rvalue::CopyForDeref(place) = rvalue {
-                    if place.ty(&self.body.local_decls, self.tcx).ty.builtin_deref(true).is_none() {
-                        self.fail(
-                            location,
-                            "`CopyForDeref` should only be used for dereferenceable types",
-                        )
-                    }
-                }
-            }
-            StatementKind::AscribeUserType(..) => {
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(
-                        location,
-                        "`AscribeUserType` should have been removed after drop lowering phase",
-                    );
-                }
-            }
-            StatementKind::FakeRead(..) => {
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(
-                        location,
-                        "`FakeRead` should have been removed after drop lowering phase",
-                    );
-                }
-            }
-            StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(op)) => {
-                let ty = op.ty(&self.body.local_decls, self.tcx);
-                if !ty.is_bool() {
-                    self.fail(
-                        location,
-                        format!("`assume` argument must be `bool`, but got: `{ty}`"),
-                    );
-                }
-            }
-            StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(
-                CopyNonOverlapping { src, dst, count },
-            )) => {
-                let src_ty = src.ty(&self.body.local_decls, self.tcx);
-                let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
-                    src_deref.ty
-                } else {
-                    self.fail(
-                        location,
-                        format!("Expected src to be ptr in copy_nonoverlapping, got: {src_ty}"),
-                    );
-                    return;
-                };
-                let dst_ty = dst.ty(&self.body.local_decls, self.tcx);
-                let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) {
-                    dst_deref.ty
-                } else {
-                    self.fail(
-                        location,
-                        format!("Expected dst to be ptr in copy_nonoverlapping, got: {dst_ty}"),
-                    );
-                    return;
-                };
-                // since CopyNonOverlapping is parametrized by 1 type,
-                // we only need to check that they are equal and not keep an extra parameter.
-                if !self.mir_assign_valid_types(op_src_ty, op_dst_ty) {
-                    self.fail(location, format!("bad arg ({op_src_ty:?} != {op_dst_ty:?})"));
-                }
-
-                let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
-                if op_cnt_ty != self.tcx.types.usize {
-                    self.fail(location, format!("bad arg ({op_cnt_ty:?} != usize)"))
-                }
-            }
-            StatementKind::SetDiscriminant { place, .. } => {
-                if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
-                }
-                let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind();
-                if !matches!(pty, ty::Adt(..) | ty::Coroutine(..) | ty::Alias(ty::Opaque, ..)) {
-                    self.fail(
-                        location,
-                        format!(
-                            "`SetDiscriminant` is only allowed on ADTs and coroutines, not {pty:?}"
-                        ),
-                    );
-                }
-            }
-            StatementKind::Deinit(..) => {
-                if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(location, "`Deinit`is not allowed until deaggregation");
-                }
-            }
-            StatementKind::Retag(kind, _) => {
-                // FIXME(JakobDegen) The validator should check that `self.mir_phase <
-                // DropsLowered`. However, this causes ICEs with generation of drop shims, which
-                // seem to fail to set their `MirPhase` correctly.
-                if matches!(kind, RetagKind::TwoPhase) {
-                    self.fail(location, format!("explicit `{kind:?}` is forbidden"));
-                }
-            }
-            StatementKind::StorageLive(_)
-            | StatementKind::StorageDead(_)
-            | StatementKind::Coverage(_)
-            | StatementKind::ConstEvalCounter
-            | StatementKind::PlaceMention(..)
-            | StatementKind::Nop => {}
-        }
-
-        self.super_statement(statement, location);
-    }
-
-    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
-        match &terminator.kind {
-            TerminatorKind::SwitchInt { targets, discr } => {
-                let switch_ty = discr.ty(&self.body.local_decls, self.tcx);
-
-                let target_width = self.tcx.sess.target.pointer_width;
-
-                let size = Size::from_bits(match switch_ty.kind() {
-                    ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(),
-                    ty::Int(int) => int.normalize(target_width).bit_width().unwrap(),
-                    ty::Char => 32,
-                    ty::Bool => 1,
-                    other => bug!("unhandled type: {:?}", other),
-                });
-
-                for (value, _) in targets.iter() {
-                    if Scalar::<()>::try_from_uint(value, size).is_none() {
-                        self.fail(
-                            location,
-                            format!("the value {value:#x} is not a proper {switch_ty:?}"),
-                        )
-                    }
-                }
-            }
-            TerminatorKind::Call { func, .. } => {
-                let func_ty = func.ty(&self.body.local_decls, self.tcx);
-                match func_ty.kind() {
-                    ty::FnPtr(..) | ty::FnDef(..) => {}
-                    _ => self.fail(
-                        location,
-                        format!("encountered non-callable type {func_ty} in `Call` terminator"),
-                    ),
-                }
-            }
-            TerminatorKind::Assert { cond, .. } => {
-                let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
-                if cond_ty != self.tcx.types.bool {
-                    self.fail(
-                        location,
-                        format!(
-                            "encountered non-boolean condition of type {cond_ty} in `Assert` terminator"
-                        ),
-                    );
-                }
-            }
-            TerminatorKind::Goto { .. }
-            | TerminatorKind::Drop { .. }
-            | TerminatorKind::Yield { .. }
-            | TerminatorKind::FalseEdge { .. }
-            | TerminatorKind::FalseUnwind { .. }
-            | TerminatorKind::InlineAsm { .. }
-            | TerminatorKind::CoroutineDrop
-            | TerminatorKind::UnwindResume
-            | TerminatorKind::UnwindTerminate(_)
-            | TerminatorKind::Return
-            | TerminatorKind::Unreachable => {}
-        }
-
-        self.super_terminator(terminator, location);
-    }
-}
diff --git a/compiler/rustc_const_eval/src/util/alignment.rs b/compiler/rustc_const_eval/src/util/alignment.rs
index 8642dfccd78..528274e6aba 100644
--- a/compiler/rustc_const_eval/src/util/alignment.rs
+++ b/compiler/rustc_const_eval/src/util/alignment.rs
@@ -1,6 +1,7 @@
 use rustc_middle::mir::*;
 use rustc_middle::ty::{self, TyCtxt};
 use rustc_target::abi::Align;
+use tracing::debug;
 
 /// Returns `true` if this place is allowed to be less aligned
 /// than its containing struct (because it is within a packed
diff --git a/compiler/rustc_const_eval/src/util/caller_location.rs b/compiler/rustc_const_eval/src/util/caller_location.rs
index af9a4a4271d..62c5f8734a2 100644
--- a/compiler/rustc_const_eval/src/util/caller_location.rs
+++ b/compiler/rustc_const_eval/src/util/caller_location.rs
@@ -1,16 +1,18 @@
 use rustc_hir::LangItem;
+use rustc_middle::bug;
 use rustc_middle::mir;
 use rustc_middle::query::TyCtxtAt;
 use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::ty::{self, Mutability};
 use rustc_span::symbol::Symbol;
+use tracing::trace;
 
 use crate::const_eval::{mk_eval_cx_to_read_const_val, CanAccessMutGlobal, CompileTimeEvalContext};
 use crate::interpret::*;
 
 /// Allocate a `const core::panic::Location` with the provided filename and line/column numbers.
-fn alloc_caller_location<'mir, 'tcx>(
-    ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
+fn alloc_caller_location<'tcx>(
+    ecx: &mut CompileTimeEvalContext<'tcx>,
     filename: Symbol,
     line: u32,
     col: u32,
diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
index 8c4af5e5132..68fb122a765 100644
--- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
+++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
@@ -1,3 +1,4 @@
+use rustc_middle::bug;
 use rustc_middle::ty::layout::{LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement};
 use rustc_middle::ty::{ParamEnv, ParamEnvAnd, Ty, TyCtxt};
 use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants};
@@ -115,7 +116,7 @@ fn might_permit_raw_init_lax<'tcx>(
 
     // Special magic check for references and boxes (i.e., special pointer types).
     if let Some(pointee) = this.ty.builtin_deref(false) {
-        let pointee = cx.layout_of(pointee.ty)?;
+        let pointee = cx.layout_of(pointee)?;
         // We need to ensure that the LLVM attributes `aligned` and `dereferenceable(size)` are satisfied.
         if pointee.align.abi.bytes() > 1 {
             // 0x01-filling is not aligned.
diff --git a/compiler/rustc_const_eval/src/util/mod.rs b/compiler/rustc_const_eval/src/util/mod.rs
index 0c3b59a0e78..66a1addfb52 100644
--- a/compiler/rustc_const_eval/src/util/mod.rs
+++ b/compiler/rustc_const_eval/src/util/mod.rs
@@ -19,7 +19,9 @@ pub fn binop_left_homogeneous(op: mir::BinOp) -> bool {
     match op {
         Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Div | Rem | BitXor
         | BitAnd | BitOr | Offset | Shl | ShlUnchecked | Shr | ShrUnchecked => true,
-        Eq | Ne | Lt | Le | Gt | Ge | Cmp => false,
+        AddWithOverflow | SubWithOverflow | MulWithOverflow | Eq | Ne | Lt | Le | Gt | Ge | Cmp => {
+            false
+        }
     }
 }
 
@@ -29,8 +31,9 @@ pub fn binop_left_homogeneous(op: mir::BinOp) -> bool {
 pub fn binop_right_homogeneous(op: mir::BinOp) -> bool {
     use rustc_middle::mir::BinOp::*;
     match op {
-        Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Div | Rem | BitXor
-        | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge | Cmp => true,
+        Add | AddUnchecked | AddWithOverflow | Sub | SubUnchecked | SubWithOverflow | Mul
+        | MulUnchecked | MulWithOverflow | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt
+        | Le | Gt | Ge | Cmp => true,
         Offset | Shl | ShlUnchecked | Shr | ShrUnchecked => false,
     }
 }
diff --git a/compiler/rustc_const_eval/src/util/type_name.rs b/compiler/rustc_const_eval/src/util/type_name.rs
index e474b952938..01e517250f7 100644
--- a/compiler/rustc_const_eval/src/util/type_name.rs
+++ b/compiler/rustc_const_eval/src/util/type_name.rs
@@ -1,6 +1,7 @@
 use rustc_data_structures::intern::Interned;
 use rustc_hir::def_id::CrateNum;
 use rustc_hir::definitions::DisambiguatedDefPathData;
+use rustc_middle::bug;
 use rustc_middle::ty::{
     self,
     print::{PrettyPrinter, Print, PrintError, Printer},