about summary refs log tree commit diff
path: root/compiler/rustc_const_eval/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval/src')
-rw-r--r--compiler/rustc_const_eval/src/const_eval/error.rs181
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs436
-rw-r--r--compiler/rustc_const_eval/src/const_eval/fn_queries.rs98
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs765
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs77
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs448
-rw-r--r--compiler/rustc_const_eval/src/errors.rs901
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs481
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs280
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs1270
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs331
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs678
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs632
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs1380
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs43
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs798
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs510
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs1072
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs370
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs381
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs973
-rw-r--r--compiler/rustc_const_eval/src/interpret/traits.rs59
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs106
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs1039
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs213
-rw-r--r--compiler/rustc_const_eval/src/lib.rs59
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs1006
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/mod.rs140
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/ops.rs648
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs123
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs395
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/resolver.rs366
-rw-r--r--compiler/rustc_const_eval/src/transform/mod.rs2
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs1350
-rw-r--r--compiler/rustc_const_eval/src/util/alignment.rs72
-rw-r--r--compiler/rustc_const_eval/src/util/caller_location.rs71
-rw-r--r--compiler/rustc_const_eval/src/util/check_validity_requirement.rs159
-rw-r--r--compiler/rustc_const_eval/src/util/compare_types.rs58
-rw-r--r--compiler/rustc_const_eval/src/util/mod.rs36
-rw-r--r--compiler/rustc_const_eval/src/util/type_name.rs184
40 files changed, 18191 insertions, 0 deletions
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs
new file mode 100644
index 00000000000..763344207c4
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/error.rs
@@ -0,0 +1,181 @@
+use std::mem;
+
+use rustc_errors::{DiagArgName, DiagArgValue, DiagMessage, Diagnostic, IntoDiagArg};
+use rustc_hir::CRATE_HIR_ID;
+use rustc_middle::mir::interpret::Provenance;
+use rustc_middle::mir::AssertKind;
+use rustc_middle::query::TyCtxtAt;
+use rustc_middle::ty::TyCtxt;
+use rustc_middle::ty::{layout::LayoutError, ConstInt};
+use rustc_span::{Span, Symbol, DUMMY_SP};
+
+use super::CompileTimeInterpreter;
+use crate::errors::{self, FrameNote, ReportErrorExt};
+use crate::interpret::{ErrorHandled, Frame, InterpError, InterpErrorInfo, MachineStopType};
+
+/// The CTFE machine has some custom error kinds.
+#[derive(Clone, Debug)]
+pub enum ConstEvalErrKind {
+    ConstAccessesMutGlobal,
+    ModifiedGlobal,
+    RecursiveStatic,
+    AssertFailure(AssertKind<ConstInt>),
+    Panic { msg: Symbol, line: u32, col: u32, file: Symbol },
+}
+
+impl MachineStopType for ConstEvalErrKind {
+    fn diagnostic_message(&self) -> DiagMessage {
+        use crate::fluent_generated::*;
+        use ConstEvalErrKind::*;
+        match self {
+            ConstAccessesMutGlobal => const_eval_const_accesses_mut_global,
+            ModifiedGlobal => const_eval_modified_global,
+            Panic { .. } => const_eval_panic,
+            RecursiveStatic => const_eval_recursive_static,
+            AssertFailure(x) => x.diagnostic_message(),
+        }
+    }
+    fn add_args(self: Box<Self>, adder: &mut dyn FnMut(DiagArgName, DiagArgValue)) {
+        use ConstEvalErrKind::*;
+        match *self {
+            RecursiveStatic | ConstAccessesMutGlobal | ModifiedGlobal => {}
+            AssertFailure(kind) => kind.add_args(adder),
+            Panic { msg, line, col, file } => {
+                adder("msg".into(), msg.into_diag_arg());
+                adder("file".into(), file.into_diag_arg());
+                adder("line".into(), line.into_diag_arg());
+                adder("col".into(), col.into_diag_arg());
+            }
+        }
+    }
+}
+
+/// The errors become [`InterpError::MachineStop`] when being raised.
+impl<'tcx> Into<InterpErrorInfo<'tcx>> for ConstEvalErrKind {
+    fn into(self) -> InterpErrorInfo<'tcx> {
+        err_machine_stop!(self).into()
+    }
+}
+
+pub fn get_span_and_frames<'tcx, 'mir>(
+    tcx: TyCtxtAt<'tcx>,
+    stack: &[Frame<'mir, 'tcx, impl Provenance, impl Sized>],
+) -> (Span, Vec<errors::FrameNote>)
+where
+    'tcx: 'mir,
+{
+    let mut stacktrace = Frame::generate_stacktrace_from_stack(stack);
+    // Filter out `requires_caller_location` frames.
+    stacktrace.retain(|frame| !frame.instance.def.requires_caller_location(*tcx));
+    let span = stacktrace.first().map(|f| f.span).unwrap_or(tcx.span);
+
+    let mut frames = Vec::new();
+
+    // Add notes to the backtrace. Don't print a single-line backtrace though.
+    if stacktrace.len() > 1 {
+        // Helper closure to print duplicated lines.
+        let mut add_frame = |mut frame: errors::FrameNote| {
+            frames.push(errors::FrameNote { times: 0, ..frame.clone() });
+            // Don't print [... additional calls ...] if the number of lines is small
+            if frame.times < 3 {
+                let times = frame.times;
+                frame.times = 0;
+                frames.extend(std::iter::repeat(frame).take(times as usize));
+            } else {
+                frames.push(frame);
+            }
+        };
+
+        let mut last_frame: Option<errors::FrameNote> = None;
+        for frame_info in &stacktrace {
+            let frame = frame_info.as_note(*tcx);
+            match last_frame.as_mut() {
+                Some(last_frame)
+                    if last_frame.span == frame.span
+                        && last_frame.where_ == frame.where_
+                        && last_frame.instance == frame.instance =>
+                {
+                    last_frame.times += 1;
+                }
+                Some(last_frame) => {
+                    add_frame(mem::replace(last_frame, frame));
+                }
+                None => {
+                    last_frame = Some(frame);
+                }
+            }
+        }
+        if let Some(frame) = last_frame {
+            add_frame(frame);
+        }
+    }
+
+    (span, frames)
+}
+
+/// Create a diagnostic for a const eval error.
+///
+/// This will use the `mk` function for creating the error which will get passed labels according to
+/// the `InterpError` and the span and a stacktrace of current execution according to
+/// `get_span_and_frames`.
+pub(super) fn report<'tcx, C, F, E>(
+    tcx: TyCtxt<'tcx>,
+    error: InterpError<'tcx>,
+    span: Option<Span>,
+    get_span_and_frames: C,
+    mk: F,
+) -> ErrorHandled
+where
+    C: FnOnce() -> (Span, Vec<FrameNote>),
+    F: FnOnce(Span, Vec<FrameNote>) -> E,
+    E: Diagnostic<'tcx>,
+{
+    // Special handling for certain errors
+    match error {
+        // Don't emit a new diagnostic for these errors, they are already reported elsewhere or
+        // should remain silent.
+        err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
+            ErrorHandled::TooGeneric(span.unwrap_or(DUMMY_SP))
+        }
+        err_inval!(AlreadyReported(guar)) => ErrorHandled::Reported(guar, span.unwrap_or(DUMMY_SP)),
+        err_inval!(Layout(LayoutError::ReferencesError(guar))) => {
+            ErrorHandled::Reported(guar.into(), span.unwrap_or(DUMMY_SP))
+        }
+        // Report remaining errors.
+        _ => {
+            let (our_span, frames) = get_span_and_frames();
+            let span = span.unwrap_or(our_span);
+            let err = mk(span, frames);
+            let mut err = tcx.dcx().create_err(err);
+
+            let msg = error.diagnostic_message();
+            error.add_args(&mut err);
+
+            // Use *our* span to label the interp error
+            err.span_label(our_span, msg);
+            ErrorHandled::Reported(err.emit().into(), span)
+        }
+    }
+}
+
+/// Emit a lint from a const-eval situation.
+// Even if this is unused, please don't remove it -- chances are we will need to emit a lint during const-eval again in the future!
+pub(super) fn lint<'tcx, 'mir, L>(
+    tcx: TyCtxtAt<'tcx>,
+    machine: &CompileTimeInterpreter<'mir, 'tcx>,
+    lint: &'static rustc_session::lint::Lint,
+    decorator: impl FnOnce(Vec<errors::FrameNote>) -> L,
+) where
+    L: for<'a> rustc_errors::LintDiagnostic<'a, ()>,
+{
+    let (span, frames) = get_span_and_frames(tcx, &machine.stack);
+
+    tcx.emit_node_span_lint(
+        lint,
+        // We use the root frame for this so the crate that defines the const defines whether the
+        // lint is emitted.
+        machine.stack.first().and_then(|frame| frame.lint_root()).unwrap_or(CRATE_HIR_ID),
+        span,
+        decorator(frames),
+    );
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
new file mode 100644
index 00000000000..5a1c7cc4209
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -0,0 +1,436 @@
+use either::{Left, Right};
+
+use rustc_hir::def::DefKind;
+use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo};
+use rustc_middle::mir::{self, ConstAlloc, ConstValue};
+use rustc_middle::query::TyCtxtAt;
+use rustc_middle::traits::Reveal;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::def_id::LocalDefId;
+use rustc_span::Span;
+use rustc_target::abi::{self, Abi};
+
+use super::{CanAccessMutGlobal, CompileTimeEvalContext, CompileTimeInterpreter};
+use crate::const_eval::CheckAlignment;
+use crate::errors;
+use crate::errors::ConstEvalError;
+use crate::interpret::eval_nullary_intrinsic;
+use crate::interpret::{
+    create_static_alloc, intern_const_alloc_recursive, CtfeValidationMode, GlobalId, Immediate,
+    InternKind, InterpCx, InterpError, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking,
+    StackPopCleanup,
+};
+
+// Returns a pointer to where the result lives
+#[instrument(level = "trace", skip(ecx, body))]
+fn eval_body_using_ecx<'mir, 'tcx, R: InterpretationResult<'tcx>>(
+    ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
+    cid: GlobalId<'tcx>,
+    body: &'mir mir::Body<'tcx>,
+) -> InterpResult<'tcx, R> {
+    trace!(?ecx.param_env);
+    let tcx = *ecx.tcx;
+    assert!(
+        cid.promoted.is_some()
+            || matches!(
+                ecx.tcx.def_kind(cid.instance.def_id()),
+                DefKind::Const
+                    | DefKind::Static { .. }
+                    | DefKind::ConstParam
+                    | DefKind::AnonConst
+                    | DefKind::InlineConst
+                    | DefKind::AssocConst
+            ),
+        "Unexpected DefKind: {:?}",
+        ecx.tcx.def_kind(cid.instance.def_id())
+    );
+    let layout = ecx.layout_of(body.bound_return_ty().instantiate(tcx, cid.instance.args))?;
+    assert!(layout.is_sized());
+
+    let intern_kind = if cid.promoted.is_some() {
+        InternKind::Promoted
+    } else {
+        match tcx.static_mutability(cid.instance.def_id()) {
+            Some(m) => InternKind::Static(m),
+            None => InternKind::Constant,
+        }
+    };
+
+    let ret = if let InternKind::Static(_) = intern_kind {
+        create_static_alloc(ecx, cid.instance.def_id().expect_local(), layout)?
+    } else {
+        ecx.allocate(layout, MemoryKind::Stack)?
+    };
+
+    trace!(
+        "eval_body_using_ecx: pushing stack frame for global: {}{}",
+        with_no_trimmed_paths!(ecx.tcx.def_path_str(cid.instance.def_id())),
+        cid.promoted.map_or_else(String::new, |p| format!("::{p:?}"))
+    );
+
+    ecx.push_stack_frame(
+        cid.instance,
+        body,
+        &ret.clone().into(),
+        StackPopCleanup::Root { cleanup: false },
+    )?;
+    ecx.storage_live_for_always_live_locals()?;
+
+    // The main interpreter loop.
+    while ecx.step()? {}
+
+    // Intern the result
+    intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
+
+    // Since evaluation had no errors, validate the resulting constant.
+    const_validate_mplace(&ecx, &ret, cid)?;
+
+    Ok(R::make_result(ret, ecx))
+}
+
+/// The `InterpCx` is only meant to be used to do field and index projections into constants for
+/// `simd_shuffle` and const patterns in match arms.
+///
+/// This should *not* be used to do any actual interpretation. In particular, alignment checks are
+/// turned off!
+///
+/// The function containing the `match` that is currently being analyzed may have generic bounds
+/// that inform us about the generic bounds of the constant. E.g., using an associated constant
+/// of a function's generic parameter will require knowledge about the bounds on the generic
+/// parameter. These bounds are passed to `mk_eval_cx` via the `ParamEnv` argument.
+pub(crate) fn mk_eval_cx_to_read_const_val<'mir, 'tcx>(
+    tcx: TyCtxt<'tcx>,
+    root_span: Span,
+    param_env: ty::ParamEnv<'tcx>,
+    can_access_mut_global: CanAccessMutGlobal,
+) -> CompileTimeEvalContext<'mir, 'tcx> {
+    debug!("mk_eval_cx: {:?}", param_env);
+    InterpCx::new(
+        tcx,
+        root_span,
+        param_env,
+        CompileTimeInterpreter::new(can_access_mut_global, CheckAlignment::No),
+    )
+}
+
+/// Create an interpreter context to inspect the given `ConstValue`.
+/// Returns both the context and an `OpTy` that represents the constant.
+pub fn mk_eval_cx_for_const_val<'mir, 'tcx>(
+    tcx: TyCtxtAt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    val: mir::ConstValue<'tcx>,
+    ty: Ty<'tcx>,
+) -> Option<(CompileTimeEvalContext<'mir, 'tcx>, OpTy<'tcx>)> {
+    let ecx = mk_eval_cx_to_read_const_val(tcx.tcx, tcx.span, param_env, CanAccessMutGlobal::No);
+    let op = ecx.const_val_to_op(val, ty, None).ok()?;
+    Some((ecx, op))
+}
+
+/// This function converts an interpreter value into a MIR constant.
+///
+/// The `for_diagnostics` flag turns the usual rules for returning `ConstValue::Scalar` into a
+/// best-effort attempt. This is not okay for use in const-eval sine it breaks invariants rustc
+/// relies on, but it is okay for diagnostics which will just give up gracefully when they
+/// encounter an `Indirect` they cannot handle.
+#[instrument(skip(ecx), level = "debug")]
+pub(super) fn op_to_const<'tcx>(
+    ecx: &CompileTimeEvalContext<'_, 'tcx>,
+    op: &OpTy<'tcx>,
+    for_diagnostics: bool,
+) -> ConstValue<'tcx> {
+    // Handle ZST consistently and early.
+    if op.layout.is_zst() {
+        return ConstValue::ZeroSized;
+    }
+
+    // All scalar types should be stored as `ConstValue::Scalar`. This is needed to make
+    // `ConstValue::try_to_scalar` efficient; we want that to work for *all* constants of scalar
+    // type (it's used throughout the compiler and having it work just on literals is not enough)
+    // and we want it to be fast (i.e., don't go to an `Allocation` and reconstruct the `Scalar`
+    // from its byte-serialized form).
+    let force_as_immediate = match op.layout.abi {
+        Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
+        // We don't *force* `ConstValue::Slice` for `ScalarPair`. This has the advantage that if the
+        // input `op` is a place, then turning it into a `ConstValue` and back into a `OpTy` will
+        // not have to generate any duplicate allocations (we preserve the original `AllocId` in
+        // `ConstValue::Indirect`). It means accessing the contents of a slice can be slow (since
+        // they can be stored as `ConstValue::Indirect`), but that's not relevant since we barely
+        // ever have to do this. (`try_get_slice_bytes_for_diagnostics` exists to provide this
+        // functionality.)
+        _ => false,
+    };
+    let immediate = if force_as_immediate {
+        match ecx.read_immediate(op) {
+            Ok(imm) => Right(imm),
+            Err(err) if !for_diagnostics => {
+                panic!("normalization works on validated constants: {err:?}")
+            }
+            _ => op.as_mplace_or_imm(),
+        }
+    } else {
+        op.as_mplace_or_imm()
+    };
+
+    debug!(?immediate);
+
+    match immediate {
+        Left(ref mplace) => {
+            // We know `offset` is relative to the allocation, so we can use `into_parts`.
+            let (prov, offset) = mplace.ptr().into_parts();
+            let alloc_id = prov.expect("cannot have `fake` place for non-ZST type").alloc_id();
+            ConstValue::Indirect { alloc_id, offset }
+        }
+        // see comment on `let force_as_immediate` above
+        Right(imm) => match *imm {
+            Immediate::Scalar(x) => ConstValue::Scalar(x),
+            Immediate::ScalarPair(a, b) => {
+                debug!("ScalarPair(a: {:?}, b: {:?})", a, b);
+                // This codepath solely exists for `valtree_to_const_value` to not need to generate
+                // a `ConstValue::Indirect` for wide references, so it is tightly restricted to just
+                // that case.
+                let pointee_ty = imm.layout.ty.builtin_deref(false).unwrap().ty; // `false` = no raw ptrs
+                debug_assert!(
+                    matches!(
+                        ecx.tcx.struct_tail_without_normalization(pointee_ty).kind(),
+                        ty::Str | ty::Slice(..),
+                    ),
+                    "`ConstValue::Slice` is for slice-tailed types only, but got {}",
+                    imm.layout.ty,
+                );
+                let msg = "`op_to_const` on an immediate scalar pair must only be used on slice references to the beginning of an actual allocation";
+                // We know `offset` is relative to the allocation, so we can use `into_parts`.
+                let (prov, offset) = a.to_pointer(ecx).expect(msg).into_parts();
+                let alloc_id = prov.expect(msg).alloc_id();
+                let data = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
+                assert!(offset == abi::Size::ZERO, "{}", msg);
+                let meta = b.to_target_usize(ecx).expect(msg);
+                ConstValue::Slice { data, meta }
+            }
+            Immediate::Uninit => bug!("`Uninit` is not a valid value for {}", op.layout.ty),
+        },
+    }
+}
+
+#[instrument(skip(tcx), level = "debug", ret)]
+pub(crate) fn turn_into_const_value<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    constant: ConstAlloc<'tcx>,
+    key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ConstValue<'tcx> {
+    let cid = key.value;
+    let def_id = cid.instance.def.def_id();
+    let is_static = tcx.is_static(def_id);
+    // This is just accessing an already computed constant, so no need to check alignment here.
+    let ecx = mk_eval_cx_to_read_const_val(
+        tcx,
+        tcx.def_span(key.value.instance.def_id()),
+        key.param_env,
+        CanAccessMutGlobal::from(is_static),
+    );
+
+    let mplace = ecx.raw_const_to_mplace(constant).expect(
+        "can only fail if layout computation failed, \
+        which should have given a good error before ever invoking this function",
+    );
+    assert!(
+        !is_static || cid.promoted.is_some(),
+        "the `eval_to_const_value_raw` query should not be used for statics, use `eval_to_allocation` instead"
+    );
+
+    // Turn this into a proper constant.
+    op_to_const(&ecx, &mplace.into(), /* for diagnostics */ false)
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub fn eval_to_const_value_raw_provider<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ::rustc_middle::mir::interpret::EvalToConstValueResult<'tcx> {
+    // Const eval always happens in Reveal::All mode in order to be able to use the hidden types of
+    // opaque types. This is needed for trivial things like `size_of`, but also for using associated
+    // types that are not specified in the opaque type.
+    assert_eq!(key.param_env.reveal(), Reveal::All);
+
+    // We call `const_eval` for zero arg intrinsics, too, in order to cache their value.
+    // Catch such calls and evaluate them instead of trying to load a constant's MIR.
+    if let ty::InstanceDef::Intrinsic(def_id) = key.value.instance.def {
+        let ty = key.value.instance.ty(tcx, key.param_env);
+        let ty::FnDef(_, args) = ty.kind() else {
+            bug!("intrinsic with type {:?}", ty);
+        };
+        return eval_nullary_intrinsic(tcx, key.param_env, def_id, args).map_err(|error| {
+            let span = tcx.def_span(def_id);
+
+            super::report(
+                tcx,
+                error.into_kind(),
+                Some(span),
+                || (span, vec![]),
+                |span, _| errors::NullaryIntrinsicError { span },
+            )
+        });
+    }
+
+    tcx.eval_to_allocation_raw(key).map(|val| turn_into_const_value(tcx, val, key))
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub fn eval_static_initializer_provider<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    def_id: LocalDefId,
+) -> ::rustc_middle::mir::interpret::EvalStaticInitializerRawResult<'tcx> {
+    assert!(tcx.is_static(def_id.to_def_id()));
+
+    let instance = ty::Instance::mono(tcx, def_id.to_def_id());
+    let cid = rustc_middle::mir::interpret::GlobalId { instance, promoted: None };
+    eval_in_interpreter(tcx, cid, ty::ParamEnv::reveal_all())
+}
+
+pub trait InterpretationResult<'tcx> {
+    /// This function takes the place where the result of the evaluation is stored
+    /// and prepares it for returning it in the appropriate format needed by the specific
+    /// evaluation query.
+    fn make_result<'mir>(
+        mplace: MPlaceTy<'tcx>,
+        ecx: &mut InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>,
+    ) -> Self;
+}
+
+impl<'tcx> InterpretationResult<'tcx> for ConstAlloc<'tcx> {
+    fn make_result<'mir>(
+        mplace: MPlaceTy<'tcx>,
+        _ecx: &mut InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>,
+    ) -> Self {
+        ConstAlloc { alloc_id: mplace.ptr().provenance.unwrap().alloc_id(), ty: mplace.layout.ty }
+    }
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub fn eval_to_allocation_raw_provider<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ::rustc_middle::mir::interpret::EvalToAllocationRawResult<'tcx> {
+    // This shouldn't be used for statics, since statics are conceptually places,
+    // not values -- so what we do here could break pointer identity.
+    assert!(key.value.promoted.is_some() || !tcx.is_static(key.value.instance.def_id()));
+    // Const eval always happens in Reveal::All mode in order to be able to use the hidden types of
+    // opaque types. This is needed for trivial things like `size_of`, but also for using associated
+    // types that are not specified in the opaque type.
+
+    assert_eq!(key.param_env.reveal(), Reveal::All);
+    if cfg!(debug_assertions) {
+        // Make sure we format the instance even if we do not print it.
+        // This serves as a regression test against an ICE on printing.
+        // The next two lines concatenated contain some discussion:
+        // https://rust-lang.zulipchat.com/#narrow/stream/146212-t-compiler.2Fconst-eval/
+        // subject/anon_const_instance_printing/near/135980032
+        let instance = with_no_trimmed_paths!(key.value.instance.to_string());
+        trace!("const eval: {:?} ({})", key, instance);
+    }
+
+    eval_in_interpreter(tcx, key.value, key.param_env)
+}
+
+fn eval_in_interpreter<'tcx, R: InterpretationResult<'tcx>>(
+    tcx: TyCtxt<'tcx>,
+    cid: GlobalId<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+) -> Result<R, ErrorHandled> {
+    let def = cid.instance.def.def_id();
+    let is_static = tcx.is_static(def);
+
+    let mut ecx = InterpCx::new(
+        tcx,
+        tcx.def_span(def),
+        param_env,
+        // Statics (and promoteds inside statics) may access mutable global memory, because unlike consts
+        // they do not have to behave "as if" they were evaluated at runtime.
+        // For consts however we want to ensure they behave "as if" they were evaluated at runtime,
+        // so we have to reject reading mutable global memory.
+        CompileTimeInterpreter::new(CanAccessMutGlobal::from(is_static), CheckAlignment::Error),
+    );
+    let res = ecx.load_mir(cid.instance.def, cid.promoted);
+    res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, body)).map_err(|error| {
+        let (error, backtrace) = error.into_parts();
+        backtrace.print_backtrace();
+
+        let (kind, instance) = if ecx.tcx.is_static(cid.instance.def_id()) {
+            ("static", String::new())
+        } else {
+            // If the current item has generics, we'd like to enrich the message with the
+            // instance and its args: to show the actual compile-time values, in addition to
+            // the expression, leading to the const eval error.
+            let instance = &cid.instance;
+            if !instance.args.is_empty() {
+                let instance = with_no_trimmed_paths!(instance.to_string());
+                ("const_with_path", instance)
+            } else {
+                ("const", String::new())
+            }
+        };
+
+        super::report(
+            *ecx.tcx,
+            error,
+            None,
+            || super::get_span_and_frames(ecx.tcx, ecx.stack()),
+            |span, frames| ConstEvalError { span, error_kind: kind, instance, frame_notes: frames },
+        )
+    })
+}
+
+#[inline(always)]
+fn const_validate_mplace<'mir, 'tcx>(
+    ecx: &InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>,
+    mplace: &MPlaceTy<'tcx>,
+    cid: GlobalId<'tcx>,
+) -> Result<(), ErrorHandled> {
+    let alloc_id = mplace.ptr().provenance.unwrap().alloc_id();
+    let mut ref_tracking = RefTracking::new(mplace.clone());
+    let mut inner = false;
+    while let Some((mplace, path)) = ref_tracking.todo.pop() {
+        let mode = match ecx.tcx.static_mutability(cid.instance.def_id()) {
+            _ if cid.promoted.is_some() => CtfeValidationMode::Promoted,
+            Some(mutbl) => CtfeValidationMode::Static { mutbl }, // a `static`
+            None => {
+                // This is a normal `const` (not promoted).
+                // The outermost allocation is always only copied, so having `UnsafeCell` in there
+                // is okay despite them being in immutable memory.
+                CtfeValidationMode::Const { allow_immutable_unsafe_cell: !inner }
+            }
+        };
+        ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)
+            // Instead of just reporting the `InterpError` via the usual machinery, we give a more targetted
+            // error about the validation failure.
+            .map_err(|error| report_validation_error(&ecx, error, alloc_id))?;
+        inner = true;
+    }
+
+    Ok(())
+}
+
+#[inline(always)]
+fn report_validation_error<'mir, 'tcx>(
+    ecx: &InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>,
+    error: InterpErrorInfo<'tcx>,
+    alloc_id: AllocId,
+) -> ErrorHandled {
+    let (error, backtrace) = error.into_parts();
+    backtrace.print_backtrace();
+
+    let ub_note = matches!(error, InterpError::UndefinedBehavior(_)).then(|| {});
+
+    let bytes = ecx.print_alloc_bytes_for_diagnostics(alloc_id);
+    let (size, align, _) = ecx.get_alloc_info(alloc_id);
+    let raw_bytes = errors::RawBytesNote { size: size.bytes(), align: align.bytes(), bytes };
+
+    crate::const_eval::report(
+        *ecx.tcx,
+        error,
+        None,
+        || crate::const_eval::get_span_and_frames(ecx.tcx, ecx.stack()),
+        move |span, frames| errors::ValidationFailure { span, ub_note, frames, raw_bytes },
+    )
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
new file mode 100644
index 00000000000..ddad6683afb
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
@@ -0,0 +1,98 @@
+use rustc_attr as attr;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::Symbol;
+
+/// Whether the `def_id` is an unstable const fn and what feature gate(s) are necessary to enable
+/// it.
+pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<(Symbol, Option<Symbol>)> {
+    if tcx.is_const_fn_raw(def_id) {
+        let const_stab = tcx.lookup_const_stability(def_id)?;
+        match const_stab.level {
+            attr::StabilityLevel::Unstable { implied_by, .. } => {
+                Some((const_stab.feature, implied_by))
+            }
+            attr::StabilityLevel::Stable { .. } => None,
+        }
+    } else {
+        None
+    }
+}
+
+pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+    let parent_id = tcx.local_parent(def_id);
+    matches!(tcx.def_kind(parent_id), DefKind::Impl { .. })
+        && tcx.constness(parent_id) == hir::Constness::Const
+}
+
+/// Checks whether an item is considered to be `const`. If it is a constructor, anonymous const,
+/// const block, const item or associated const, it is const. If it is a trait impl/function,
+/// return if it has a `const` modifier. If it is an intrinsic, report whether said intrinsic
+/// has a `rustc_const_{un,}stable` attribute. Otherwise, return `Constness::NotConst`.
+fn constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness {
+    let node = tcx.hir_node_by_def_id(def_id);
+
+    match node {
+        hir::Node::Ctor(_)
+        | hir::Node::AnonConst(_)
+        | hir::Node::ConstBlock(_)
+        | hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. }) => {
+            hir::Constness::Const
+        }
+        hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(_), .. }) => tcx
+            .generics_of(def_id)
+            .host_effect_index
+            .map_or(hir::Constness::NotConst, |_| hir::Constness::Const),
+        hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..), .. }) => {
+            // Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other
+            // foreign items cannot be evaluated at compile-time.
+            let is_const = if tcx.intrinsic(def_id).is_some() {
+                tcx.lookup_const_stability(def_id).is_some()
+            } else {
+                false
+            };
+            if is_const { hir::Constness::Const } else { hir::Constness::NotConst }
+        }
+        hir::Node::Expr(e) if let hir::ExprKind::Closure(c) = e.kind => c.constness,
+        _ => {
+            if let Some(fn_kind) = node.fn_kind() {
+                if fn_kind.constness() == hir::Constness::Const {
+                    return hir::Constness::Const;
+                }
+
+                // If the function itself is not annotated with `const`, it may still be a `const fn`
+                // if it resides in a const trait impl.
+                let is_const = is_parent_const_impl_raw(tcx, def_id);
+                if is_const { hir::Constness::Const } else { hir::Constness::NotConst }
+            } else {
+                hir::Constness::NotConst
+            }
+        }
+    }
+}
+
+fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    tcx.is_const_fn(def_id)
+        && match tcx.lookup_const_stability(def_id) {
+            Some(stab) => {
+                if cfg!(debug_assertions) && stab.promotable {
+                    let sig = tcx.fn_sig(def_id);
+                    assert_eq!(
+                        sig.skip_binder().unsafety(),
+                        hir::Unsafety::Normal,
+                        "don't mark const unsafe fns as promotable",
+                        // https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682
+                    );
+                }
+                stab.promotable
+            }
+            None => false,
+        }
+}
+
+pub fn provide(providers: &mut Providers) {
+    *providers = Providers { constness, is_promotable_const_fn, ..*providers };
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
new file mode 100644
index 00000000000..dd835279df3
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -0,0 +1,765 @@
+use std::borrow::Borrow;
+use std::fmt;
+use std::hash::Hash;
+use std::ops::ControlFlow;
+
+use rustc_ast::Mutability;
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_data_structures::fx::IndexEntry;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefId;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::LangItem;
+use rustc_middle::mir;
+use rustc_middle::mir::AssertMessage;
+use rustc_middle::query::TyCtxtAt;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout};
+use rustc_session::lint::builtin::WRITES_THROUGH_IMMUTABLE_POINTER;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::Span;
+use rustc_target::abi::{Align, Size};
+use rustc_target::spec::abi::Abi as CallAbi;
+
+use crate::errors::{LongRunning, LongRunningWarn};
+use crate::fluent_generated as fluent;
+use crate::interpret::{
+    self, compile_time_machine, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, FnVal,
+    Frame, ImmTy, InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, PointerArithmetic, Scalar,
+};
+
+use super::error::*;
+
+/// When hitting this many interpreted terminators we emit a deny by default lint
+/// that notfies the user that their constant takes a long time to evaluate. If that's
+/// what they intended, they can just allow the lint.
+const LINT_TERMINATOR_LIMIT: usize = 2_000_000;
+/// The limit used by `-Z tiny-const-eval-limit`. This smaller limit is useful for internal
+/// tests not needing to run 30s or more to show some behaviour.
+const TINY_LINT_TERMINATOR_LIMIT: usize = 20;
+/// After this many interpreted terminators, we start emitting progress indicators at every
+/// power of two of interpreted terminators.
+const PROGRESS_INDICATOR_START: usize = 4_000_000;
+
+/// Extra machine state for CTFE, and the Machine instance
+pub struct CompileTimeInterpreter<'mir, 'tcx> {
+    /// The number of terminators that have been evaluated.
+    ///
+    /// This is used to produce lints informing the user that the compiler is not stuck.
+    /// Set to `usize::MAX` to never report anything.
+    pub(super) num_evaluated_steps: usize,
+
+    /// The virtual call stack.
+    pub(super) stack: Vec<Frame<'mir, 'tcx>>,
+
+    /// Pattern matching on consts with references would be unsound if those references
+    /// could point to anything mutable. Therefore, when evaluating consts and when constructing valtrees,
+    /// we ensure that only immutable global memory can be accessed.
+    pub(super) can_access_mut_global: CanAccessMutGlobal,
+
+    /// Whether to check alignment during evaluation.
+    pub(super) check_alignment: CheckAlignment,
+
+    /// If `Some`, we are evaluating the initializer of the static with the given `LocalDefId`,
+    /// storing the result in the given `AllocId`.
+    /// Used to prevent reads from a static's base allocation, as that may allow for self-initialization loops.
+    pub(crate) static_root_ids: Option<(AllocId, LocalDefId)>,
+}
+
+#[derive(Copy, Clone)]
+pub enum CheckAlignment {
+    /// Ignore all alignment requirements.
+    /// This is mainly used in interning.
+    No,
+    /// Hard error when dereferencing a misaligned pointer.
+    Error,
+}
+
+#[derive(Copy, Clone, PartialEq)]
+pub(crate) enum CanAccessMutGlobal {
+    No,
+    Yes,
+}
+
+impl From<bool> for CanAccessMutGlobal {
+    fn from(value: bool) -> Self {
+        if value { Self::Yes } else { Self::No }
+    }
+}
+
+impl<'mir, 'tcx> CompileTimeInterpreter<'mir, 'tcx> {
+    pub(crate) fn new(
+        can_access_mut_global: CanAccessMutGlobal,
+        check_alignment: CheckAlignment,
+    ) -> Self {
+        CompileTimeInterpreter {
+            num_evaluated_steps: 0,
+            stack: Vec::new(),
+            can_access_mut_global,
+            check_alignment,
+            static_root_ids: None,
+        }
+    }
+}
+
+impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxIndexMap<K, V> {
+    #[inline(always)]
+    fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
+    where
+        K: Borrow<Q>,
+    {
+        FxIndexMap::contains_key(self, k)
+    }
+
+    #[inline(always)]
+    fn contains_key_ref<Q: ?Sized + Hash + Eq>(&self, k: &Q) -> bool
+    where
+        K: Borrow<Q>,
+    {
+        FxIndexMap::contains_key(self, k)
+    }
+
+    #[inline(always)]
+    fn insert(&mut self, k: K, v: V) -> Option<V> {
+        FxIndexMap::insert(self, k, v)
+    }
+
+    #[inline(always)]
+    fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
+    where
+        K: Borrow<Q>,
+    {
+        // FIXME(#120456) - is `swap_remove` correct?
+        FxIndexMap::swap_remove(self, k)
+    }
+
+    #[inline(always)]
+    fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
+        self.iter().filter_map(move |(k, v)| f(k, &*v)).collect()
+    }
+
+    #[inline(always)]
+    fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E> {
+        match self.get(&k) {
+            Some(v) => Ok(v),
+            None => {
+                vacant()?;
+                bug!("The CTFE machine shouldn't ever need to extend the alloc_map when reading")
+            }
+        }
+    }
+
+    #[inline(always)]
+    fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E> {
+        match self.entry(k) {
+            IndexEntry::Occupied(e) => Ok(e.into_mut()),
+            IndexEntry::Vacant(e) => {
+                let v = vacant()?;
+                Ok(e.insert(v))
+            }
+        }
+    }
+}
+
+pub(crate) type CompileTimeEvalContext<'mir, 'tcx> =
+    InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>;
+
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+pub enum MemoryKind {
+    Heap,
+}
+
+impl fmt::Display for MemoryKind {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            MemoryKind::Heap => write!(f, "heap allocation"),
+        }
+    }
+}
+
+impl interpret::MayLeak for MemoryKind {
+    #[inline(always)]
+    fn may_leak(self) -> bool {
+        match self {
+            MemoryKind::Heap => false,
+        }
+    }
+}
+
+impl interpret::MayLeak for ! {
+    #[inline(always)]
+    fn may_leak(self) -> bool {
+        // `self` is uninhabited
+        self
+    }
+}
+
+impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
+    fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32) {
+        let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+        let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
+
+        use rustc_session::{config::RemapPathScopeComponents, RemapFileNameExt};
+        (
+            Symbol::intern(
+                &caller
+                    .file
+                    .name
+                    .for_scope(self.tcx.sess, RemapPathScopeComponents::DIAGNOSTICS)
+                    .to_string_lossy(),
+            ),
+            u32::try_from(caller.line).unwrap(),
+            u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
+        )
+    }
+
+    /// "Intercept" a function call, because we have something special to do for it.
+    /// All `#[rustc_do_not_const_check]` functions should be hooked here.
+    /// If this returns `Some` function, which may be `instance` or a different function with
+    /// compatible arguments, then evaluation should continue with that function.
+    /// If this returns `None`, the function call has been handled and the function has returned.
+    fn hook_special_const_fn(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        args: &[FnArg<'tcx>],
+        dest: &MPlaceTy<'tcx>,
+        ret: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
+        let def_id = instance.def_id();
+
+        if self.tcx.has_attr(def_id, sym::rustc_const_panic_str)
+            || Some(def_id) == self.tcx.lang_items().begin_panic_fn()
+        {
+            let args = self.copy_fn_args(args);
+            // &str or &&str
+            assert!(args.len() == 1);
+
+            let mut msg_place = self.deref_pointer(&args[0])?;
+            while msg_place.layout.ty.is_ref() {
+                msg_place = self.deref_pointer(&msg_place)?;
+            }
+
+            let msg = Symbol::intern(self.read_str(&msg_place)?);
+            let span = self.find_closest_untracked_caller_location();
+            let (file, line, col) = self.location_triple_for_span(span);
+            return Err(ConstEvalErrKind::Panic { msg, file, line, col }.into());
+        } else if Some(def_id) == self.tcx.lang_items().panic_fmt() {
+            // For panic_fmt, call const_panic_fmt instead.
+            let const_def_id = self.tcx.require_lang_item(LangItem::ConstPanicFmt, None);
+            let new_instance = ty::Instance::expect_resolve(
+                *self.tcx,
+                ty::ParamEnv::reveal_all(),
+                const_def_id,
+                instance.args,
+            );
+
+            return Ok(Some(new_instance));
+        } else if Some(def_id) == self.tcx.lang_items().align_offset_fn() {
+            let args = self.copy_fn_args(args);
+            // For align_offset, we replace the function call if the pointer has no address.
+            match self.align_offset(instance, &args, dest, ret)? {
+                ControlFlow::Continue(()) => return Ok(Some(instance)),
+                ControlFlow::Break(()) => return Ok(None),
+            }
+        }
+        Ok(Some(instance))
+    }
+
+    /// `align_offset(ptr, target_align)` needs special handling in const eval, because the pointer
+    /// may not have an address.
+    ///
+    /// If `ptr` does have a known address, then we return `Continue(())` and the function call should
+    /// proceed as normal.
+    ///
+    /// If `ptr` doesn't have an address, but its underlying allocation's alignment is at most
+    /// `target_align`, then we call the function again with an dummy address relative to the
+    /// allocation.
+    ///
+    /// If `ptr` doesn't have an address and `target_align` is stricter than the underlying
+    /// allocation's alignment, then we return `usize::MAX` immediately.
+    fn align_offset(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx>],
+        dest: &MPlaceTy<'tcx>,
+        ret: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx, ControlFlow<()>> {
+        assert_eq!(args.len(), 2);
+
+        let ptr = self.read_pointer(&args[0])?;
+        let target_align = self.read_scalar(&args[1])?.to_target_usize(self)?;
+
+        if !target_align.is_power_of_two() {
+            throw_ub_custom!(
+                fluent::const_eval_align_offset_invalid_align,
+                target_align = target_align,
+            );
+        }
+
+        match self.ptr_try_get_alloc_id(ptr) {
+            Ok((alloc_id, offset, _extra)) => {
+                let (_size, alloc_align, _kind) = self.get_alloc_info(alloc_id);
+
+                if target_align <= alloc_align.bytes() {
+                    // Extract the address relative to the allocation base that is definitely
+                    // sufficiently aligned and call `align_offset` again.
+                    let addr = ImmTy::from_uint(offset.bytes(), args[0].layout).into();
+                    let align = ImmTy::from_uint(target_align, args[1].layout).into();
+                    let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
+
+                    // We replace the entire function call with a "tail call".
+                    // Note that this happens before the frame of the original function
+                    // is pushed on the stack.
+                    self.eval_fn_call(
+                        FnVal::Instance(instance),
+                        (CallAbi::Rust, fn_abi),
+                        &[FnArg::Copy(addr), FnArg::Copy(align)],
+                        /* with_caller_location = */ false,
+                        dest,
+                        ret,
+                        mir::UnwindAction::Unreachable,
+                    )?;
+                    Ok(ControlFlow::Break(()))
+                } else {
+                    // Not alignable in const, return `usize::MAX`.
+                    let usize_max = Scalar::from_target_usize(self.target_usize_max(), self);
+                    self.write_scalar(usize_max, dest)?;
+                    self.return_to_block(ret)?;
+                    Ok(ControlFlow::Break(()))
+                }
+            }
+            Err(_addr) => {
+                // The pointer has an address, continue with function call.
+                Ok(ControlFlow::Continue(()))
+            }
+        }
+    }
+
+    /// See documentation on the `ptr_guaranteed_cmp` intrinsic.
+    fn guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8> {
+        Ok(match (a, b) {
+            // Comparisons between integers are always known.
+            (Scalar::Int { .. }, Scalar::Int { .. }) => {
+                if a == b {
+                    1
+                } else {
+                    0
+                }
+            }
+            // Comparisons of abstract pointers with null pointers are known if the pointer
+            // is in bounds, because if they are in bounds, the pointer can't be null.
+            // Inequality with integers other than null can never be known for sure.
+            (Scalar::Int(int), ptr @ Scalar::Ptr(..))
+            | (ptr @ Scalar::Ptr(..), Scalar::Int(int))
+                if int.is_null() && !self.scalar_may_be_null(ptr)? =>
+            {
+                0
+            }
+            // Equality with integers can never be known for sure.
+            (Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => 2,
+            // FIXME: return a `1` for when both sides are the same pointer, *except* that
+            // some things (like functions and vtables) do not have stable addresses
+            // so we need to be careful around them (see e.g. #73722).
+            // FIXME: return `0` for at least some comparisons where we can reliably
+            // determine the result of runtime inequality tests at compile-time.
+            // Examples include comparison of addresses in different static items.
+            (Scalar::Ptr(..), Scalar::Ptr(..)) => 2,
+        })
+    }
+}
+
+impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, 'tcx> {
+    compile_time_machine!(<'mir, 'tcx>);
+
+    type MemoryKind = MemoryKind;
+
+    const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error
+
+    #[inline(always)]
+    fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+        matches!(ecx.machine.check_alignment, CheckAlignment::Error)
+    }
+
+    #[inline(always)]
+    fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool {
+        ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks || layout.abi.is_uninhabited()
+    }
+
+    fn load_mir(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        instance: ty::InstanceDef<'tcx>,
+    ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+        match instance {
+            ty::InstanceDef::Item(def) => {
+                if ecx.tcx.is_ctfe_mir_available(def) {
+                    Ok(ecx.tcx.mir_for_ctfe(def))
+                } else if ecx.tcx.def_kind(def) == DefKind::AssocConst {
+                    ecx.tcx.dcx().bug("This is likely a const item that is missing from its impl");
+                } else {
+                    // `find_mir_or_eval_fn` checks that this is a const fn before even calling us,
+                    // so this should be unreachable.
+                    let path = ecx.tcx.def_path_str(def);
+                    bug!("trying to call extern function `{path}` at compile-time");
+                }
+            }
+            _ => Ok(ecx.tcx.instance_mir(instance)),
+        }
+    }
+
+    fn find_mir_or_eval_fn(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        orig_instance: ty::Instance<'tcx>,
+        _abi: CallAbi,
+        args: &[FnArg<'tcx>],
+        dest: &MPlaceTy<'tcx>,
+        ret: Option<mir::BasicBlock>,
+        _unwind: mir::UnwindAction, // unwinding is not supported in consts
+    ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>> {
+        debug!("find_mir_or_eval_fn: {:?}", orig_instance);
+
+        // Replace some functions.
+        let Some(instance) = ecx.hook_special_const_fn(orig_instance, args, dest, ret)? else {
+            // Call has already been handled.
+            return Ok(None);
+        };
+
+        // Only check non-glue functions
+        if let ty::InstanceDef::Item(def) = instance.def {
+            // Execution might have wandered off into other crates, so we cannot do a stability-
+            // sensitive check here. But we can at least rule out functions that are not const at
+            // all. That said, we have to allow calling functions inside a trait marked with
+            // #[const_trait]. These *are* const-checked!
+            // FIXME: why does `is_const_fn_raw` not classify them as const?
+            if (!ecx.tcx.is_const_fn_raw(def) && !ecx.tcx.is_const_default_method(def))
+                || ecx.tcx.has_attr(def, sym::rustc_do_not_const_check)
+            {
+                // We certainly do *not* want to actually call the fn
+                // though, so be sure we return here.
+                throw_unsup_format!("calling non-const function `{}`", instance)
+            }
+        }
+
+        // This is a const fn. Call it.
+        // In case of replacement, we return the *original* instance to make backtraces work out
+        // (and we hope this does not confuse the FnAbi checks too much).
+        Ok(Some((ecx.load_mir(instance.def, None)?, orig_instance)))
+    }
+
+    fn panic_nounwind(ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
+        let msg = Symbol::intern(msg);
+        let span = ecx.find_closest_untracked_caller_location();
+        let (file, line, col) = ecx.location_triple_for_span(span);
+        Err(ConstEvalErrKind::Panic { msg, file, line, col }.into())
+    }
+
+    fn call_intrinsic(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx>],
+        dest: &MPlaceTy<'tcx, Self::Provenance>,
+        target: Option<mir::BasicBlock>,
+        _unwind: mir::UnwindAction,
+    ) -> InterpResult<'tcx> {
+        // Shared intrinsics.
+        if ecx.emulate_intrinsic(instance, args, dest, target)? {
+            return Ok(());
+        }
+        let intrinsic_name = ecx.tcx.item_name(instance.def_id());
+
+        // CTFE-specific intrinsics.
+        let Some(ret) = target else {
+            throw_unsup_format!("intrinsic `{intrinsic_name}` is not supported at compile-time");
+        };
+        match intrinsic_name {
+            sym::ptr_guaranteed_cmp => {
+                let a = ecx.read_scalar(&args[0])?;
+                let b = ecx.read_scalar(&args[1])?;
+                let cmp = ecx.guaranteed_cmp(a, b)?;
+                ecx.write_scalar(Scalar::from_u8(cmp), dest)?;
+            }
+            sym::const_allocate => {
+                let size = ecx.read_scalar(&args[0])?.to_target_usize(ecx)?;
+                let align = ecx.read_scalar(&args[1])?.to_target_usize(ecx)?;
+
+                let align = match Align::from_bytes(align) {
+                    Ok(a) => a,
+                    Err(err) => throw_ub_custom!(
+                        fluent::const_eval_invalid_align_details,
+                        name = "const_allocate",
+                        err_kind = err.diag_ident(),
+                        align = err.align()
+                    ),
+                };
+
+                let ptr = ecx.allocate_ptr(
+                    Size::from_bytes(size),
+                    align,
+                    interpret::MemoryKind::Machine(MemoryKind::Heap),
+                )?;
+                ecx.write_pointer(ptr, dest)?;
+            }
+            sym::const_deallocate => {
+                let ptr = ecx.read_pointer(&args[0])?;
+                let size = ecx.read_scalar(&args[1])?.to_target_usize(ecx)?;
+                let align = ecx.read_scalar(&args[2])?.to_target_usize(ecx)?;
+
+                let size = Size::from_bytes(size);
+                let align = match Align::from_bytes(align) {
+                    Ok(a) => a,
+                    Err(err) => throw_ub_custom!(
+                        fluent::const_eval_invalid_align_details,
+                        name = "const_deallocate",
+                        err_kind = err.diag_ident(),
+                        align = err.align()
+                    ),
+                };
+
+                // If an allocation is created in an another const,
+                // we don't deallocate it.
+                let (alloc_id, _, _) = ecx.ptr_get_alloc_id(ptr)?;
+                let is_allocated_in_another_const = matches!(
+                    ecx.tcx.try_get_global_alloc(alloc_id),
+                    Some(interpret::GlobalAlloc::Memory(_))
+                );
+
+                if !is_allocated_in_another_const {
+                    ecx.deallocate_ptr(
+                        ptr,
+                        Some((size, align)),
+                        interpret::MemoryKind::Machine(MemoryKind::Heap),
+                    )?;
+                }
+            }
+            // The intrinsic represents whether the value is known to the optimizer (LLVM).
+            // We're not doing any optimizations here, so there is no optimizer that could know the value.
+            // (We know the value here in the machine of course, but this is the runtime of that code,
+            // not the optimization stage.)
+            sym::is_val_statically_known => ecx.write_scalar(Scalar::from_bool(false), dest)?,
+            _ => {
+                throw_unsup_format!(
+                    "intrinsic `{intrinsic_name}` is not supported at compile-time"
+                );
+            }
+        }
+
+        ecx.go_to_block(ret);
+        Ok(())
+    }
+
+    fn assert_panic(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        msg: &AssertMessage<'tcx>,
+        _unwind: mir::UnwindAction,
+    ) -> InterpResult<'tcx> {
+        use rustc_middle::mir::AssertKind::*;
+        // Convert `AssertKind<Operand>` to `AssertKind<Scalar>`.
+        let eval_to_int =
+            |op| ecx.read_immediate(&ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
+        let err = match msg {
+            BoundsCheck { len, index } => {
+                let len = eval_to_int(len)?;
+                let index = eval_to_int(index)?;
+                BoundsCheck { len, index }
+            }
+            Overflow(op, l, r) => Overflow(*op, eval_to_int(l)?, eval_to_int(r)?),
+            OverflowNeg(op) => OverflowNeg(eval_to_int(op)?),
+            DivisionByZero(op) => DivisionByZero(eval_to_int(op)?),
+            RemainderByZero(op) => RemainderByZero(eval_to_int(op)?),
+            ResumedAfterReturn(coroutine_kind) => ResumedAfterReturn(*coroutine_kind),
+            ResumedAfterPanic(coroutine_kind) => ResumedAfterPanic(*coroutine_kind),
+            MisalignedPointerDereference { ref required, ref found } => {
+                MisalignedPointerDereference {
+                    required: eval_to_int(required)?,
+                    found: eval_to_int(found)?,
+                }
+            }
+        };
+        Err(ConstEvalErrKind::AssertFailure(err).into())
+    }
+
+    fn binary_ptr_op(
+        _ecx: &InterpCx<'mir, 'tcx, Self>,
+        _bin_op: mir::BinOp,
+        _left: &ImmTy<'tcx>,
+        _right: &ImmTy<'tcx>,
+    ) -> InterpResult<'tcx, (ImmTy<'tcx>, bool)> {
+        throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
+    }
+
+    fn increment_const_eval_counter(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+        // The step limit has already been hit in a previous call to `increment_const_eval_counter`.
+
+        if let Some(new_steps) = ecx.machine.num_evaluated_steps.checked_add(1) {
+            let (limit, start) = if ecx.tcx.sess.opts.unstable_opts.tiny_const_eval_limit {
+                (TINY_LINT_TERMINATOR_LIMIT, TINY_LINT_TERMINATOR_LIMIT)
+            } else {
+                (LINT_TERMINATOR_LIMIT, PROGRESS_INDICATOR_START)
+            };
+
+            ecx.machine.num_evaluated_steps = new_steps;
+            // By default, we have a *deny* lint kicking in after some time
+            // to ensure `loop {}` doesn't just go forever.
+            // In case that lint got reduced, in particular for `--cap-lint` situations, we also
+            // have a hard warning shown every now and then for really long executions.
+            if new_steps == limit {
+                // By default, we stop after a million steps, but the user can disable this lint
+                // to be able to run until the heat death of the universe or power loss, whichever
+                // comes first.
+                let hir_id = ecx.best_lint_scope();
+                let is_error = ecx
+                    .tcx
+                    .lint_level_at_node(
+                        rustc_session::lint::builtin::LONG_RUNNING_CONST_EVAL,
+                        hir_id,
+                    )
+                    .0
+                    .is_error();
+                let span = ecx.cur_span();
+                ecx.tcx.emit_node_span_lint(
+                    rustc_session::lint::builtin::LONG_RUNNING_CONST_EVAL,
+                    hir_id,
+                    span,
+                    LongRunning { item_span: ecx.tcx.span },
+                );
+                // If this was a hard error, don't bother continuing evaluation.
+                if is_error {
+                    let guard = ecx
+                        .tcx
+                        .dcx()
+                        .span_delayed_bug(span, "The deny lint should have already errored");
+                    throw_inval!(AlreadyReported(guard.into()));
+                }
+            } else if new_steps > start && new_steps.is_power_of_two() {
+                // Only report after a certain number of terminators have been evaluated and the
+                // current number of evaluated terminators is a power of 2. The latter gives us a cheap
+                // way to implement exponential backoff.
+                let span = ecx.cur_span();
+                ecx.tcx.dcx().emit_warn(LongRunningWarn { span, item_span: ecx.tcx.span });
+            }
+        }
+
+        Ok(())
+    }
+
+    #[inline(always)]
+    fn expose_ptr(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx> {
+        // This is only reachable with -Zunleash-the-miri-inside-of-you.
+        throw_unsup_format!("exposing pointers is not possible at compile-time")
+    }
+
+    #[inline(always)]
+    fn init_frame_extra(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        frame: Frame<'mir, 'tcx>,
+    ) -> InterpResult<'tcx, Frame<'mir, 'tcx>> {
+        // Enforce stack size limit. Add 1 because this is run before the new frame is pushed.
+        if !ecx.recursion_limit.value_within_limit(ecx.stack().len() + 1) {
+            throw_exhaust!(StackFrameLimitReached)
+        } else {
+            Ok(frame)
+        }
+    }
+
+    #[inline(always)]
+    fn stack<'a>(
+        ecx: &'a InterpCx<'mir, 'tcx, Self>,
+    ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] {
+        &ecx.machine.stack
+    }
+
+    #[inline(always)]
+    fn stack_mut<'a>(
+        ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+    ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> {
+        &mut ecx.machine.stack
+    }
+
+    fn before_access_global(
+        _tcx: TyCtxtAt<'tcx>,
+        machine: &Self,
+        alloc_id: AllocId,
+        alloc: ConstAllocation<'tcx>,
+        _static_def_id: Option<DefId>,
+        is_write: bool,
+    ) -> InterpResult<'tcx> {
+        let alloc = alloc.inner();
+        if is_write {
+            // Write access. These are never allowed, but we give a targeted error message.
+            match alloc.mutability {
+                Mutability::Not => Err(err_ub!(WriteToReadOnly(alloc_id)).into()),
+                Mutability::Mut => Err(ConstEvalErrKind::ModifiedGlobal.into()),
+            }
+        } else {
+            // Read access. These are usually allowed, with some exceptions.
+            if machine.can_access_mut_global == CanAccessMutGlobal::Yes {
+                // Machine configuration allows us read from anything (e.g., `static` initializer).
+                Ok(())
+            } else if alloc.mutability == Mutability::Mut {
+                // Machine configuration does not allow us to read statics (e.g., `const`
+                // initializer).
+                Err(ConstEvalErrKind::ConstAccessesMutGlobal.into())
+            } else {
+                // Immutable global, this read is fine.
+                assert_eq!(alloc.mutability, Mutability::Not);
+                Ok(())
+            }
+        }
+    }
+
+    fn retag_ptr_value(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _kind: mir::RetagKind,
+        val: &ImmTy<'tcx, CtfeProvenance>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, CtfeProvenance>> {
+        // If it's a frozen shared reference that's not already immutable, make it immutable.
+        // (Do nothing on `None` provenance, that cannot store immutability anyway.)
+        if let ty::Ref(_, ty, mutbl) = val.layout.ty.kind()
+            && *mutbl == Mutability::Not
+            && val.to_scalar_and_meta().0.to_pointer(ecx)?.provenance.is_some_and(|p| !p.immutable())
+            // That next check is expensive, that's why we have all the guards above.
+            && ty.is_freeze(*ecx.tcx, ecx.param_env)
+        {
+            let place = ecx.ref_to_mplace(val)?;
+            let new_place = place.map_provenance(CtfeProvenance::as_immutable);
+            Ok(ImmTy::from_immediate(new_place.to_ref(ecx), val.layout))
+        } else {
+            Ok(val.clone())
+        }
+    }
+
+    fn before_memory_write(
+        tcx: TyCtxtAt<'tcx>,
+        machine: &mut Self,
+        _alloc_extra: &mut Self::AllocExtra,
+        (_alloc_id, immutable): (AllocId, bool),
+        range: AllocRange,
+    ) -> InterpResult<'tcx> {
+        if range.size == Size::ZERO {
+            // Nothing to check.
+            return Ok(());
+        }
+        // Reject writes through immutable pointers.
+        if immutable {
+            super::lint(tcx, machine, WRITES_THROUGH_IMMUTABLE_POINTER, |frames| {
+                crate::errors::WriteThroughImmutablePointer { frames }
+            });
+        }
+        // Everything else is fine.
+        Ok(())
+    }
+
+    fn before_alloc_read(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        alloc_id: AllocId,
+    ) -> InterpResult<'tcx> {
+        if Some(alloc_id) == ecx.machine.static_root_ids.map(|(id, _)| id) {
+            Err(ConstEvalErrKind::RecursiveStatic.into())
+        } else {
+            Ok(())
+        }
+    }
+}
+
+// Please do not add any code below the above `Machine` trait impl. I (oli-obk) plan more cleanups
+// so we can end up having a file with just that impl, but for now, let's keep the impl discoverable
+// at the bottom of this file.
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
new file mode 100644
index 00000000000..d0d6adbfad0
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -0,0 +1,77 @@
+// Not in interpret to make sure we do not use private implementation details
+
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::InterpErrorInfo;
+use rustc_middle::query::TyCtxtAt;
+use rustc_middle::ty::{self, Ty};
+
+use crate::interpret::format_interp_error;
+
+mod error;
+mod eval_queries;
+mod fn_queries;
+mod machine;
+mod valtrees;
+
+pub use error::*;
+pub use eval_queries::*;
+pub use fn_queries::*;
+pub use machine::*;
+pub(crate) use valtrees::{eval_to_valtree, valtree_to_const_value};
+
+// We forbid type-level constants that contain more than `VALTREE_MAX_NODES` nodes.
+const VALTREE_MAX_NODES: usize = 100000;
+
+pub(crate) enum ValTreeCreationError {
+    NodesOverflow,
+    /// Values of this type, or this particular value, are not supported as valtrees.
+    NonSupportedType,
+}
+pub(crate) type ValTreeCreationResult<'tcx> = Result<ty::ValTree<'tcx>, ValTreeCreationError>;
+
+impl From<InterpErrorInfo<'_>> for ValTreeCreationError {
+    fn from(err: InterpErrorInfo<'_>) -> Self {
+        ty::tls::with(|tcx| {
+            bug!(
+                "Unexpected Undefined Behavior error during valtree construction: {}",
+                format_interp_error(tcx.dcx(), err),
+            )
+        })
+    }
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub(crate) fn try_destructure_mir_constant_for_user_output<'tcx>(
+    tcx: TyCtxtAt<'tcx>,
+    val: mir::ConstValue<'tcx>,
+    ty: Ty<'tcx>,
+) -> Option<mir::DestructuredConstant<'tcx>> {
+    let param_env = ty::ParamEnv::reveal_all();
+    let (ecx, op) = mk_eval_cx_for_const_val(tcx, param_env, val, ty)?;
+
+    // We go to `usize` as we cannot allocate anything bigger anyway.
+    let (field_count, variant, down) = match ty.kind() {
+        ty::Array(_, len) => (len.eval_target_usize(tcx.tcx, param_env) as usize, None, op),
+        ty::Adt(def, _) if def.variants().is_empty() => {
+            return None;
+        }
+        ty::Adt(def, _) => {
+            let variant = ecx.read_discriminant(&op).ok()?;
+            let down = ecx.project_downcast(&op, variant).ok()?;
+            (def.variants()[variant].fields.len(), Some(variant), down)
+        }
+        ty::Tuple(args) => (args.len(), None, op),
+        _ => bug!("cannot destructure mir constant {:?}", val),
+    };
+
+    let fields_iter = (0..field_count)
+        .map(|i| {
+            let field_op = ecx.project_field(&down, i).ok()?;
+            let val = op_to_const(&ecx, &field_op, /* for diagnostics */ true);
+            Some((val, field_op.layout.ty))
+        })
+        .collect::<Option<Vec<_>>>()?;
+    let fields = tcx.arena.alloc_from_iter(fields_iter);
+
+    Some(mir::DestructuredConstant { variant, fields })
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
new file mode 100644
index 00000000000..d3428d27d52
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -0,0 +1,448 @@
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
+use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
+use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
+use rustc_span::DUMMY_SP;
+use rustc_target::abi::{Abi, VariantIdx};
+
+use super::eval_queries::{mk_eval_cx_to_read_const_val, op_to_const};
+use super::machine::CompileTimeEvalContext;
+use super::{ValTreeCreationError, ValTreeCreationResult, VALTREE_MAX_NODES};
+use crate::const_eval::CanAccessMutGlobal;
+use crate::errors::MaxNumNodesInConstErr;
+use crate::interpret::MPlaceTy;
+use crate::interpret::{
+    intern_const_alloc_recursive, ImmTy, Immediate, InternKind, MemPlaceMeta, MemoryKind, PlaceTy,
+    Projectable, Scalar,
+};
+
+#[instrument(skip(ecx), level = "debug")]
+fn branches<'tcx>(
+    ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+    place: &MPlaceTy<'tcx>,
+    n: usize,
+    variant: Option<VariantIdx>,
+    num_nodes: &mut usize,
+) -> ValTreeCreationResult<'tcx> {
+    let place = match variant {
+        Some(variant) => ecx.project_downcast(place, variant).unwrap(),
+        None => place.clone(),
+    };
+    let variant = variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32()))));
+    debug!(?place, ?variant);
+
+    let mut fields = Vec::with_capacity(n);
+    for i in 0..n {
+        let field = ecx.project_field(&place, i).unwrap();
+        let valtree = const_to_valtree_inner(ecx, &field, num_nodes)?;
+        fields.push(Some(valtree));
+    }
+
+    // For enums, we prepend their variant index before the variant's fields so we can figure out
+    // the variant again when just seeing a valtree.
+    let branches = variant
+        .into_iter()
+        .chain(fields.into_iter())
+        .collect::<Option<Vec<_>>>()
+        .expect("should have already checked for errors in ValTree creation");
+
+    // Have to account for ZSTs here
+    if branches.len() == 0 {
+        *num_nodes += 1;
+    }
+
+    Ok(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(branches)))
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn slice_branches<'tcx>(
+    ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+    place: &MPlaceTy<'tcx>,
+    num_nodes: &mut usize,
+) -> ValTreeCreationResult<'tcx> {
+    let n = place.len(ecx).unwrap_or_else(|_| panic!("expected to use len of place {place:?}"));
+
+    let mut elems = Vec::with_capacity(n as usize);
+    for i in 0..n {
+        let place_elem = ecx.project_index(place, i).unwrap();
+        let valtree = const_to_valtree_inner(ecx, &place_elem, num_nodes)?;
+        elems.push(valtree);
+    }
+
+    Ok(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(elems)))
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn const_to_valtree_inner<'tcx>(
+    ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+    place: &MPlaceTy<'tcx>,
+    num_nodes: &mut usize,
+) -> ValTreeCreationResult<'tcx> {
+    let ty = place.layout.ty;
+    debug!("ty kind: {:?}", ty.kind());
+
+    if *num_nodes >= VALTREE_MAX_NODES {
+        return Err(ValTreeCreationError::NodesOverflow);
+    }
+
+    match ty.kind() {
+        ty::FnDef(..) => {
+            *num_nodes += 1;
+            Ok(ty::ValTree::zst())
+        }
+        ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
+            let val = ecx.read_immediate(place)?;
+            let val = val.to_scalar();
+            *num_nodes += 1;
+
+            Ok(ty::ValTree::Leaf(val.assert_int()))
+        }
+
+        ty::RawPtr(_) => {
+            // Not all raw pointers are allowed, as we cannot properly test them for
+            // equality at compile-time (see `ptr_guaranteed_cmp`).
+            // However we allow those that are just integers in disguise.
+            // First, get the pointer. Remember it might be wide!
+            let val = ecx.read_immediate(place)?;
+            // We could allow wide raw pointers where both sides are integers in the future,
+            // but for now we reject them.
+            if matches!(val.layout.abi, Abi::ScalarPair(..)) {
+                return Err(ValTreeCreationError::NonSupportedType);
+            }
+            let val = val.to_scalar();
+            // We are in the CTFE machine, so ptr-to-int casts will fail.
+            // This can only be `Ok` if `val` already is an integer.
+            let Ok(val) = val.try_to_int() else {
+                return Err(ValTreeCreationError::NonSupportedType);
+            };
+            // It's just a ScalarInt!
+            Ok(ty::ValTree::Leaf(val))
+        }
+
+        // Technically we could allow function pointers (represented as `ty::Instance`), but this is not guaranteed to
+        // agree with runtime equality tests.
+        ty::FnPtr(_) => Err(ValTreeCreationError::NonSupportedType),
+
+        ty::Ref(_, _, _)  => {
+            let derefd_place = ecx.deref_pointer(place)?;
+            const_to_valtree_inner(ecx, &derefd_place, num_nodes)
+        }
+
+        ty::Str | ty::Slice(_) | ty::Array(_, _) => {
+            slice_branches(ecx, place, num_nodes)
+        }
+        // Trait objects are not allowed in type level constants, as we have no concept for
+        // resolving their backing type, even if we can do that at const eval time. We may
+        // hypothetically be able to allow `dyn StructuralPartialEq` trait objects in the future,
+        // but it is unclear if this is useful.
+        ty::Dynamic(..) => Err(ValTreeCreationError::NonSupportedType),
+
+        ty::Tuple(elem_tys) => {
+            branches(ecx, place, elem_tys.len(), None, num_nodes)
+        }
+
+        ty::Adt(def, _) => {
+            if def.is_union() {
+                return Err(ValTreeCreationError::NonSupportedType);
+            } else if def.variants().is_empty() {
+                bug!("uninhabited types should have errored and never gotten converted to valtree")
+            }
+
+            let variant = ecx.read_discriminant(place)?;
+            branches(ecx, place, def.variant(variant).fields.len(), def.is_enum().then_some(variant), num_nodes)
+        }
+
+        ty::Never
+        | ty::Error(_)
+        | ty::Foreign(..)
+        | ty::Infer(ty::FreshIntTy(_))
+        | ty::Infer(ty::FreshFloatTy(_))
+        // FIXME(oli-obk): we could look behind opaque types
+        | ty::Alias(..)
+        | ty::Param(_)
+        | ty::Bound(..)
+        | ty::Placeholder(..)
+        | ty::Infer(_)
+        // FIXME(oli-obk): we can probably encode closures just like structs
+        | ty::Closure(..)
+        | ty::CoroutineClosure(..)
+        | ty::Coroutine(..)
+        | ty::CoroutineWitness(..) => Err(ValTreeCreationError::NonSupportedType),
+    }
+}
+
+/// Valtrees don't store the `MemPlaceMeta` that all dynamically sized values have in the interpreter.
+/// This function reconstructs it.
+fn reconstruct_place_meta<'tcx>(
+    layout: TyAndLayout<'tcx>,
+    valtree: ty::ValTree<'tcx>,
+    tcx: TyCtxt<'tcx>,
+) -> MemPlaceMeta {
+    if layout.is_sized() {
+        return MemPlaceMeta::None;
+    }
+
+    let mut last_valtree = valtree;
+    // Traverse the type, and update `last_valtree` as we go.
+    let tail = tcx.struct_tail_with_normalize(
+        layout.ty,
+        |ty| ty,
+        || {
+            let branches = last_valtree.unwrap_branch();
+            last_valtree = *branches.last().unwrap();
+            debug!(?branches, ?last_valtree);
+        },
+    );
+    // Sanity-check that we got a tail we support.
+    match tail.kind() {
+        ty::Slice(..) | ty::Str => {}
+        _ => bug!("unsized tail of a valtree must be Slice or Str"),
+    };
+
+    // Get the number of elements in the unsized field.
+    let num_elems = last_valtree.unwrap_branch().len();
+    MemPlaceMeta::Meta(Scalar::from_target_usize(num_elems as u64, &tcx))
+}
+
+#[instrument(skip(ecx), level = "debug", ret)]
+fn create_valtree_place<'tcx>(
+    ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+    layout: TyAndLayout<'tcx>,
+    valtree: ty::ValTree<'tcx>,
+) -> MPlaceTy<'tcx> {
+    let meta = reconstruct_place_meta(layout, valtree, ecx.tcx.tcx);
+    ecx.allocate_dyn(layout, MemoryKind::Stack, meta).unwrap()
+}
+
+/// Evaluates a constant and turns it into a type-level constant value.
+pub(crate) fn eval_to_valtree<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    cid: GlobalId<'tcx>,
+) -> EvalToValTreeResult<'tcx> {
+    let const_alloc = tcx.eval_to_allocation_raw(param_env.and(cid))?;
+
+    // FIXME Need to provide a span to `eval_to_valtree`
+    let ecx = mk_eval_cx_to_read_const_val(
+        tcx,
+        DUMMY_SP,
+        param_env,
+        // It is absolutely crucial for soundness that
+        // we do not read from mutable memory.
+        CanAccessMutGlobal::No,
+    );
+    let place = ecx.raw_const_to_mplace(const_alloc).unwrap();
+    debug!(?place);
+
+    let mut num_nodes = 0;
+    let valtree_result = const_to_valtree_inner(&ecx, &place, &mut num_nodes);
+
+    match valtree_result {
+        Ok(valtree) => Ok(Some(valtree)),
+        Err(err) => {
+            let did = cid.instance.def_id();
+            let global_const_id = cid.display(tcx);
+            let span = tcx.hir().span_if_local(did);
+            match err {
+                ValTreeCreationError::NodesOverflow => {
+                    let handled =
+                        tcx.dcx().emit_err(MaxNumNodesInConstErr { span, global_const_id });
+                    Err(handled.into())
+                }
+                ValTreeCreationError::NonSupportedType => Ok(None),
+            }
+        }
+    }
+}
+
+/// Converts a `ValTree` to a `ConstValue`, which is needed after mir
+/// construction has finished.
+// FIXME Merge `valtree_to_const_value` and `valtree_into_mplace` into one function
+#[instrument(skip(tcx), level = "debug", ret)]
+pub fn valtree_to_const_value<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+    valtree: ty::ValTree<'tcx>,
+) -> mir::ConstValue<'tcx> {
+    // Basic idea: We directly construct `Scalar` values from trivial `ValTree`s
+    // (those for constants with type bool, int, uint, float or char).
+    // For all other types we create an `MPlace` and fill that by walking
+    // the `ValTree` and using `place_projection` and `place_field` to
+    // create inner `MPlace`s which are filled recursively.
+    // FIXME Does this need an example?
+
+    let (param_env, ty) = param_env_ty.into_parts();
+
+    match ty.kind() {
+        ty::FnDef(..) => {
+            assert!(valtree.unwrap_branch().is_empty());
+            mir::ConstValue::ZeroSized
+        }
+        ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char | ty::RawPtr(_) => {
+            match valtree {
+                ty::ValTree::Leaf(scalar_int) => mir::ConstValue::Scalar(Scalar::Int(scalar_int)),
+                ty::ValTree::Branch(_) => bug!(
+                    "ValTrees for Bool, Int, Uint, Float, Char or RawPtr should have the form ValTree::Leaf"
+                ),
+            }
+        }
+        ty::Ref(_, inner_ty, _) => {
+            let mut ecx =
+                mk_eval_cx_to_read_const_val(tcx, DUMMY_SP, param_env, CanAccessMutGlobal::No);
+            let imm = valtree_to_ref(&mut ecx, valtree, *inner_ty);
+            let imm = ImmTy::from_immediate(imm, tcx.layout_of(param_env_ty).unwrap());
+            op_to_const(&ecx, &imm.into(), /* for diagnostics */ false)
+        }
+        ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => {
+            let layout = tcx.layout_of(param_env_ty).unwrap();
+            if layout.is_zst() {
+                // Fast path to avoid some allocations.
+                return mir::ConstValue::ZeroSized;
+            }
+            if layout.abi.is_scalar()
+                && (matches!(ty.kind(), ty::Tuple(_))
+                    || matches!(ty.kind(), ty::Adt(def, _) if def.is_struct()))
+            {
+                // A Scalar tuple/struct; we can avoid creating an allocation.
+                let branches = valtree.unwrap_branch();
+                // Find the non-ZST field. (There can be aligned ZST!)
+                for (i, &inner_valtree) in branches.iter().enumerate() {
+                    let field = layout.field(&LayoutCx { tcx, param_env }, i);
+                    if !field.is_zst() {
+                        return valtree_to_const_value(tcx, param_env.and(field.ty), inner_valtree);
+                    }
+                }
+                bug!("could not find non-ZST field during in {layout:#?}");
+            }
+
+            let mut ecx =
+                mk_eval_cx_to_read_const_val(tcx, DUMMY_SP, param_env, CanAccessMutGlobal::No);
+
+            // Need to create a place for this valtree.
+            let place = create_valtree_place(&mut ecx, layout, valtree);
+
+            valtree_into_mplace(&mut ecx, &place, valtree);
+            dump_place(&ecx, &place);
+            intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap();
+
+            op_to_const(&ecx, &place.into(), /* for diagnostics */ false)
+        }
+        ty::Never
+        | ty::Error(_)
+        | ty::Foreign(..)
+        | ty::Infer(ty::FreshIntTy(_))
+        | ty::Infer(ty::FreshFloatTy(_))
+        | ty::Alias(..)
+        | ty::Param(_)
+        | ty::Bound(..)
+        | ty::Placeholder(..)
+        | ty::Infer(_)
+        | ty::Closure(..)
+        | ty::CoroutineClosure(..)
+        | ty::Coroutine(..)
+        | ty::CoroutineWitness(..)
+        | ty::FnPtr(_)
+        | ty::Str
+        | ty::Slice(_)
+        | ty::Dynamic(..) => bug!("no ValTree should have been created for type {:?}", ty.kind()),
+    }
+}
+
+/// Put a valtree into memory and return a reference to that.
+fn valtree_to_ref<'tcx>(
+    ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+    valtree: ty::ValTree<'tcx>,
+    pointee_ty: Ty<'tcx>,
+) -> Immediate {
+    let pointee_place = create_valtree_place(ecx, ecx.layout_of(pointee_ty).unwrap(), valtree);
+    debug!(?pointee_place);
+
+    valtree_into_mplace(ecx, &pointee_place, valtree);
+    dump_place(ecx, &pointee_place);
+    intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place).unwrap();
+
+    pointee_place.to_ref(&ecx.tcx)
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn valtree_into_mplace<'tcx>(
+    ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+    place: &MPlaceTy<'tcx>,
+    valtree: ty::ValTree<'tcx>,
+) {
+    // This will match on valtree and write the value(s) corresponding to the ValTree
+    // inside the place recursively.
+
+    let ty = place.layout.ty;
+
+    match ty.kind() {
+        ty::FnDef(_, _) => {
+            // Zero-sized type, nothing to do.
+        }
+        ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char | ty::RawPtr(..) => {
+            let scalar_int = valtree.unwrap_leaf();
+            debug!("writing trivial valtree {:?} to place {:?}", scalar_int, place);
+            ecx.write_immediate(Immediate::Scalar(scalar_int.into()), place).unwrap();
+        }
+        ty::Ref(_, inner_ty, _) => {
+            let imm = valtree_to_ref(ecx, valtree, *inner_ty);
+            debug!(?imm);
+            ecx.write_immediate(imm, place).unwrap();
+        }
+        ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str | ty::Slice(_) => {
+            let branches = valtree.unwrap_branch();
+
+            // Need to downcast place for enums
+            let (place_adjusted, branches, variant_idx) = match ty.kind() {
+                ty::Adt(def, _) if def.is_enum() => {
+                    // First element of valtree corresponds to variant
+                    let scalar_int = branches[0].unwrap_leaf();
+                    let variant_idx = VariantIdx::from_u32(scalar_int.try_to_u32().unwrap());
+                    let variant = def.variant(variant_idx);
+                    debug!(?variant);
+
+                    (
+                        ecx.project_downcast(place, variant_idx).unwrap(),
+                        &branches[1..],
+                        Some(variant_idx),
+                    )
+                }
+                _ => (place.clone(), branches, None),
+            };
+            debug!(?place_adjusted, ?branches);
+
+            // Create the places (by indexing into `place`) for the fields and fill
+            // them recursively
+            for (i, inner_valtree) in branches.iter().enumerate() {
+                debug!(?i, ?inner_valtree);
+
+                let place_inner = match ty.kind() {
+                    ty::Str | ty::Slice(_) | ty::Array(..) => {
+                        ecx.project_index(place, i as u64).unwrap()
+                    }
+                    _ => ecx.project_field(&place_adjusted, i).unwrap(),
+                };
+
+                debug!(?place_inner);
+                valtree_into_mplace(ecx, &place_inner, *inner_valtree);
+                dump_place(ecx, &place_inner);
+            }
+
+            debug!("dump of place_adjusted:");
+            dump_place(ecx, &place_adjusted);
+
+            if let Some(variant_idx) = variant_idx {
+                // don't forget filling the place with the discriminant of the enum
+                ecx.write_discriminant(variant_idx, place).unwrap();
+            }
+
+            debug!("dump of place after writing discriminant:");
+            dump_place(ecx, place);
+        }
+        _ => bug!("shouldn't have created a ValTree for {:?}", ty),
+    }
+}
+
+fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: &MPlaceTy<'tcx>) {
+    trace!("{:?}", ecx.dump_place(&PlaceTy::from(place.clone())));
+}
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
new file mode 100644
index 00000000000..cc32640408b
--- /dev/null
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -0,0 +1,901 @@
+use std::borrow::Cow;
+
+use rustc_errors::{
+    codes::*, Diag, DiagArgValue, DiagCtxt, DiagMessage, Diagnostic, EmissionGuarantee, Level,
+};
+use rustc_hir::ConstContext;
+use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic};
+use rustc_middle::mir::interpret::{
+    CheckInAllocMsg, ExpectedKind, InterpError, InvalidMetaKind, InvalidProgramInfo, Misalignment,
+    PointerKind, ResourceExhaustionInfo, UndefinedBehaviorInfo, UnsupportedOpInfo,
+    ValidationErrorInfo,
+};
+use rustc_middle::ty::{self, Mutability, Ty};
+use rustc_span::Span;
+use rustc_target::abi::call::AdjustForForeignAbiError;
+use rustc_target::abi::{Size, WrappingRange};
+
+use crate::interpret::InternKind;
+
+#[derive(Diagnostic)]
+#[diag(const_eval_dangling_ptr_in_final)]
+pub(crate) struct DanglingPtrInFinal {
+    #[primary_span]
+    pub span: Span,
+    pub kind: InternKind,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(const_eval_mutable_ptr_in_final)]
+pub(crate) struct MutablePtrInFinal {
+    // rust-lang/rust#122153: This was marked as `#[primary_span]` under
+    // `derive(Diagnostic)`. Since we expect we may hard-error in future, we are
+    // keeping the field (and skipping it under `derive(LintDiagnostic)`).
+    #[skip_arg]
+    pub span: Span,
+    pub kind: InternKind,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unstable_in_stable)]
+pub(crate) struct UnstableInStable {
+    pub gate: String,
+    #[primary_span]
+    pub span: Span,
+    #[suggestion(
+        const_eval_unstable_sugg,
+        code = "#[rustc_const_unstable(feature = \"...\", issue = \"...\")]\n",
+        applicability = "has-placeholders"
+    )]
+    #[suggestion(
+        const_eval_bypass_sugg,
+        code = "#[rustc_allow_const_fn_unstable({gate})]\n",
+        applicability = "has-placeholders"
+    )]
+    pub attr_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_thread_local_access, code = E0625)]
+pub(crate) struct ThreadLocalAccessErr {
+    #[primary_span]
+    pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_raw_ptr_to_int)]
+#[note]
+#[note(const_eval_note2)]
+pub(crate) struct RawPtrToIntErr {
+    #[primary_span]
+    pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_raw_ptr_comparison)]
+#[note]
+pub(crate) struct RawPtrComparisonErr {
+    #[primary_span]
+    pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_panic_non_str)]
+pub(crate) struct PanicNonStrErr {
+    #[primary_span]
+    pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_mut_deref, code = E0658)]
+pub(crate) struct MutDerefErr {
+    #[primary_span]
+    pub span: Span,
+    pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_transient_mut_borrow, code = E0658)]
+pub(crate) struct TransientMutBorrowErr {
+    #[primary_span]
+    pub span: Span,
+    pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_transient_mut_raw, code = E0658)]
+pub(crate) struct TransientMutRawErr {
+    #[primary_span]
+    pub span: Span,
+    pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_max_num_nodes_in_const)]
+pub(crate) struct MaxNumNodesInConstErr {
+    #[primary_span]
+    pub span: Option<Span>,
+    pub global_const_id: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unallowed_fn_pointer_call)]
+pub(crate) struct UnallowedFnPointerCall {
+    #[primary_span]
+    pub span: Span,
+    pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unstable_const_fn)]
+pub(crate) struct UnstableConstFn {
+    #[primary_span]
+    pub span: Span,
+    pub def_path: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unallowed_mutable_refs, code = E0764)]
+pub(crate) struct UnallowedMutableRefs {
+    #[primary_span]
+    pub span: Span,
+    pub kind: ConstContext,
+    #[note(const_eval_teach_note)]
+    pub teach: Option<()>,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unallowed_mutable_raw, code = E0764)]
+pub(crate) struct UnallowedMutableRaw {
+    #[primary_span]
+    pub span: Span,
+    pub kind: ConstContext,
+    #[note(const_eval_teach_note)]
+    pub teach: Option<()>,
+}
+#[derive(Diagnostic)]
+#[diag(const_eval_non_const_fmt_macro_call, code = E0015)]
+pub(crate) struct NonConstFmtMacroCall {
+    #[primary_span]
+    pub span: Span,
+    pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_non_const_fn_call, code = E0015)]
+pub(crate) struct NonConstFnCall {
+    #[primary_span]
+    pub span: Span,
+    pub def_path_str: String,
+    pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unallowed_op_in_const_context)]
+pub(crate) struct UnallowedOpInConstContext {
+    #[primary_span]
+    pub span: Span,
+    pub msg: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unallowed_heap_allocations, code = E0010)]
+pub(crate) struct UnallowedHeapAllocations {
+    #[primary_span]
+    #[label]
+    pub span: Span,
+    pub kind: ConstContext,
+    #[note(const_eval_teach_note)]
+    pub teach: Option<()>,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unallowed_inline_asm, code = E0015)]
+pub(crate) struct UnallowedInlineAsm {
+    #[primary_span]
+    pub span: Span,
+    pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_interior_mutable_data_refer, code = E0492)]
+pub(crate) struct InteriorMutableDataRefer {
+    #[primary_span]
+    #[label]
+    pub span: Span,
+    #[help]
+    pub opt_help: Option<()>,
+    pub kind: ConstContext,
+    #[note(const_eval_teach_note)]
+    pub teach: Option<()>,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_interior_mutability_borrow)]
+pub(crate) struct InteriorMutabilityBorrow {
+    #[primary_span]
+    pub span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(const_eval_long_running)]
+#[note]
+pub struct LongRunning {
+    #[help]
+    pub item_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_long_running)]
+pub struct LongRunningWarn {
+    #[primary_span]
+    #[label]
+    pub span: Span,
+    #[help]
+    pub item_span: Span,
+}
+
+#[derive(Subdiagnostic)]
+#[note(const_eval_non_const_impl)]
+pub(crate) struct NonConstImplNote {
+    #[primary_span]
+    pub span: Span,
+}
+
+#[derive(Subdiagnostic, Clone)]
+#[note(const_eval_frame_note)]
+pub struct FrameNote {
+    #[primary_span]
+    pub span: Span,
+    pub times: i32,
+    pub where_: &'static str,
+    pub instance: String,
+}
+
+#[derive(Subdiagnostic)]
+#[note(const_eval_raw_bytes)]
+pub struct RawBytesNote {
+    pub size: u64,
+    pub align: u64,
+    pub bytes: String,
+}
+
+// FIXME(fee1-dead) do not use stringly typed `ConstContext`
+
+#[derive(Diagnostic)]
+#[diag(const_eval_match_eq_non_const, code = E0015)]
+#[note]
+pub struct NonConstMatchEq<'tcx> {
+    #[primary_span]
+    pub span: Span,
+    pub ty: Ty<'tcx>,
+    pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_for_loop_into_iter_non_const, code = E0015)]
+pub struct NonConstForLoopIntoIter<'tcx> {
+    #[primary_span]
+    pub span: Span,
+    pub ty: Ty<'tcx>,
+    pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_question_branch_non_const, code = E0015)]
+pub struct NonConstQuestionBranch<'tcx> {
+    #[primary_span]
+    pub span: Span,
+    pub ty: Ty<'tcx>,
+    pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_question_from_residual_non_const, code = E0015)]
+pub struct NonConstQuestionFromResidual<'tcx> {
+    #[primary_span]
+    pub span: Span,
+    pub ty: Ty<'tcx>,
+    pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_try_block_from_output_non_const, code = E0015)]
+pub struct NonConstTryBlockFromOutput<'tcx> {
+    #[primary_span]
+    pub span: Span,
+    pub ty: Ty<'tcx>,
+    pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_await_non_const, code = E0015)]
+pub struct NonConstAwait<'tcx> {
+    #[primary_span]
+    pub span: Span,
+    pub ty: Ty<'tcx>,
+    pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_closure_non_const, code = E0015)]
+pub struct NonConstClosure {
+    #[primary_span]
+    pub span: Span,
+    pub kind: ConstContext,
+    #[subdiagnostic]
+    pub note: Option<NonConstClosureNote>,
+}
+
+#[derive(Subdiagnostic)]
+pub enum NonConstClosureNote {
+    #[note(const_eval_closure_fndef_not_const)]
+    FnDef {
+        #[primary_span]
+        span: Span,
+    },
+    #[note(const_eval_fn_ptr_call)]
+    FnPtr,
+    #[note(const_eval_closure_call)]
+    Closure,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(const_eval_consider_dereferencing, applicability = "machine-applicable")]
+pub struct ConsiderDereferencing {
+    pub deref: String,
+    #[suggestion_part(code = "{deref}")]
+    pub span: Span,
+    #[suggestion_part(code = "{deref}")]
+    pub rhs_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_operator_non_const, code = E0015)]
+pub struct NonConstOperator {
+    #[primary_span]
+    pub span: Span,
+    pub kind: ConstContext,
+    #[subdiagnostic]
+    pub sugg: Option<ConsiderDereferencing>,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_deref_coercion_non_const, code = E0015)]
+#[note]
+pub struct NonConstDerefCoercion<'tcx> {
+    #[primary_span]
+    pub span: Span,
+    pub ty: Ty<'tcx>,
+    pub kind: ConstContext,
+    pub target_ty: Ty<'tcx>,
+    #[note(const_eval_target_note)]
+    pub deref_target: Option<Span>,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_live_drop, code = E0493)]
+pub struct LiveDrop<'tcx> {
+    #[primary_span]
+    #[label]
+    pub span: Span,
+    pub kind: ConstContext,
+    pub dropped_ty: Ty<'tcx>,
+    #[label(const_eval_dropped_at_label)]
+    pub dropped_at: Option<Span>,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_error, code = E0080)]
+pub struct ConstEvalError {
+    #[primary_span]
+    pub span: Span,
+    /// One of "const", "const_with_path", and "static"
+    pub error_kind: &'static str,
+    pub instance: String,
+    #[subdiagnostic]
+    pub frame_notes: Vec<FrameNote>,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(const_eval_write_through_immutable_pointer)]
+pub struct WriteThroughImmutablePointer {
+    #[subdiagnostic]
+    pub frames: Vec<FrameNote>,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_nullary_intrinsic_fail)]
+pub struct NullaryIntrinsicError {
+    #[primary_span]
+    pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_validation_failure, code = E0080)]
+pub struct ValidationFailure {
+    #[primary_span]
+    pub span: Span,
+    #[note(const_eval_validation_failure_note)]
+    pub ub_note: Option<()>,
+    #[subdiagnostic]
+    pub frames: Vec<FrameNote>,
+    #[subdiagnostic]
+    pub raw_bytes: RawBytesNote,
+}
+
+pub trait ReportErrorExt {
+    /// Returns the diagnostic message for this error.
+    fn diagnostic_message(&self) -> DiagMessage;
+    fn add_args<G: EmissionGuarantee>(self, diag: &mut Diag<'_, G>);
+
+    fn debug(self) -> String
+    where
+        Self: Sized,
+    {
+        ty::tls::with(move |tcx| {
+            let dcx = tcx.dcx();
+            let mut diag = dcx.struct_allow(DiagMessage::Str(String::new().into()));
+            let message = self.diagnostic_message();
+            self.add_args(&mut diag);
+            let s = dcx.eagerly_translate_to_string(message, diag.args.iter());
+            diag.cancel();
+            s
+        })
+    }
+}
+
+fn bad_pointer_message(msg: CheckInAllocMsg, dcx: &DiagCtxt) -> String {
+    use crate::fluent_generated::*;
+
+    let msg = match msg {
+        CheckInAllocMsg::MemoryAccessTest => const_eval_memory_access_test,
+        CheckInAllocMsg::PointerArithmeticTest => const_eval_pointer_arithmetic_test,
+        CheckInAllocMsg::OffsetFromTest => const_eval_offset_from_test,
+        CheckInAllocMsg::InboundsTest => const_eval_in_bounds_test,
+    };
+
+    dcx.eagerly_translate_to_string(msg, [].into_iter())
+}
+
+impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
+    fn diagnostic_message(&self) -> DiagMessage {
+        use crate::fluent_generated::*;
+        use UndefinedBehaviorInfo::*;
+        match self {
+            Ub(msg) => msg.clone().into(),
+            Custom(x) => (x.msg)(),
+            ValidationError(e) => e.diagnostic_message(),
+
+            Unreachable => const_eval_unreachable,
+            BoundsCheckFailed { .. } => const_eval_bounds_check_failed,
+            DivisionByZero => const_eval_division_by_zero,
+            RemainderByZero => const_eval_remainder_by_zero,
+            DivisionOverflow => const_eval_division_overflow,
+            RemainderOverflow => const_eval_remainder_overflow,
+            PointerArithOverflow => const_eval_pointer_arithmetic_overflow,
+            InvalidMeta(InvalidMetaKind::SliceTooBig) => const_eval_invalid_meta_slice,
+            InvalidMeta(InvalidMetaKind::TooBig) => const_eval_invalid_meta,
+            UnterminatedCString(_) => const_eval_unterminated_c_string,
+            PointerUseAfterFree(_, _) => const_eval_pointer_use_after_free,
+            PointerOutOfBounds { ptr_size: Size::ZERO, .. } => const_eval_zst_pointer_out_of_bounds,
+            PointerOutOfBounds { .. } => const_eval_pointer_out_of_bounds,
+            DanglingIntPointer(0, _) => const_eval_dangling_null_pointer,
+            DanglingIntPointer(_, _) => const_eval_dangling_int_pointer,
+            AlignmentCheckFailed { .. } => const_eval_alignment_check_failed,
+            WriteToReadOnly(_) => const_eval_write_to_read_only,
+            DerefFunctionPointer(_) => const_eval_deref_function_pointer,
+            DerefVTablePointer(_) => const_eval_deref_vtable_pointer,
+            InvalidBool(_) => const_eval_invalid_bool,
+            InvalidChar(_) => const_eval_invalid_char,
+            InvalidTag(_) => const_eval_invalid_tag,
+            InvalidFunctionPointer(_) => const_eval_invalid_function_pointer,
+            InvalidVTablePointer(_) => const_eval_invalid_vtable_pointer,
+            InvalidStr(_) => const_eval_invalid_str,
+            InvalidUninitBytes(None) => const_eval_invalid_uninit_bytes_unknown,
+            InvalidUninitBytes(Some(_)) => const_eval_invalid_uninit_bytes,
+            DeadLocal => const_eval_dead_local,
+            ScalarSizeMismatch(_) => const_eval_scalar_size_mismatch,
+            UninhabitedEnumVariantWritten(_) => const_eval_uninhabited_enum_variant_written,
+            UninhabitedEnumVariantRead(_) => const_eval_uninhabited_enum_variant_read,
+            InvalidNichedEnumVariantWritten { .. } => {
+                const_eval_invalid_niched_enum_variant_written
+            }
+            AbiMismatchArgument { .. } => const_eval_incompatible_types,
+            AbiMismatchReturn { .. } => const_eval_incompatible_return_types,
+        }
+    }
+
+    fn add_args<G: EmissionGuarantee>(self, diag: &mut Diag<'_, G>) {
+        use UndefinedBehaviorInfo::*;
+        let dcx = diag.dcx;
+        match self {
+            Ub(_) => {}
+            Custom(custom) => {
+                (custom.add_args)(&mut |name, value| {
+                    diag.arg(name, value);
+                });
+            }
+            ValidationError(e) => e.add_args(diag),
+
+            Unreachable
+            | DivisionByZero
+            | RemainderByZero
+            | DivisionOverflow
+            | RemainderOverflow
+            | PointerArithOverflow
+            | InvalidMeta(InvalidMetaKind::SliceTooBig)
+            | InvalidMeta(InvalidMetaKind::TooBig)
+            | InvalidUninitBytes(None)
+            | DeadLocal
+            | UninhabitedEnumVariantWritten(_)
+            | UninhabitedEnumVariantRead(_) => {}
+            BoundsCheckFailed { len, index } => {
+                diag.arg("len", len);
+                diag.arg("index", index);
+            }
+            UnterminatedCString(ptr) | InvalidFunctionPointer(ptr) | InvalidVTablePointer(ptr) => {
+                diag.arg("pointer", ptr);
+            }
+            PointerUseAfterFree(alloc_id, msg) => {
+                diag.arg("alloc_id", alloc_id)
+                    .arg("bad_pointer_message", bad_pointer_message(msg, dcx));
+            }
+            PointerOutOfBounds { alloc_id, alloc_size, ptr_offset, ptr_size, msg } => {
+                diag.arg("alloc_id", alloc_id)
+                    .arg("alloc_size", alloc_size.bytes())
+                    .arg("ptr_offset", ptr_offset)
+                    .arg("ptr_size", ptr_size.bytes())
+                    .arg("bad_pointer_message", bad_pointer_message(msg, dcx));
+            }
+            DanglingIntPointer(ptr, msg) => {
+                if ptr != 0 {
+                    diag.arg("pointer", format!("{ptr:#x}[noalloc]"));
+                }
+
+                diag.arg("bad_pointer_message", bad_pointer_message(msg, dcx));
+            }
+            AlignmentCheckFailed(Misalignment { required, has }, msg) => {
+                diag.arg("required", required.bytes());
+                diag.arg("has", has.bytes());
+                diag.arg("msg", format!("{msg:?}"));
+            }
+            WriteToReadOnly(alloc) | DerefFunctionPointer(alloc) | DerefVTablePointer(alloc) => {
+                diag.arg("allocation", alloc);
+            }
+            InvalidBool(b) => {
+                diag.arg("value", format!("{b:02x}"));
+            }
+            InvalidChar(c) => {
+                diag.arg("value", format!("{c:08x}"));
+            }
+            InvalidTag(tag) => {
+                diag.arg("tag", format!("{tag:x}"));
+            }
+            InvalidStr(err) => {
+                diag.arg("err", format!("{err}"));
+            }
+            InvalidUninitBytes(Some((alloc, info))) => {
+                diag.arg("alloc", alloc);
+                diag.arg("access", info.access);
+                diag.arg("uninit", info.bad);
+            }
+            ScalarSizeMismatch(info) => {
+                diag.arg("target_size", info.target_size);
+                diag.arg("data_size", info.data_size);
+            }
+            InvalidNichedEnumVariantWritten { enum_ty } => {
+                diag.arg("ty", enum_ty.to_string());
+            }
+            AbiMismatchArgument { caller_ty, callee_ty }
+            | AbiMismatchReturn { caller_ty, callee_ty } => {
+                diag.arg("caller_ty", caller_ty.to_string());
+                diag.arg("callee_ty", callee_ty.to_string());
+            }
+        }
+    }
+}
+
+impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
+    fn diagnostic_message(&self) -> DiagMessage {
+        use crate::fluent_generated::*;
+        use rustc_middle::mir::interpret::ValidationErrorKind::*;
+        match self.kind {
+            PtrToUninhabited { ptr_kind: PointerKind::Box, .. } => {
+                const_eval_validation_box_to_uninhabited
+            }
+            PtrToUninhabited { ptr_kind: PointerKind::Ref(_), .. } => {
+                const_eval_validation_ref_to_uninhabited
+            }
+
+            PtrToStatic { ptr_kind: PointerKind::Box } => const_eval_validation_box_to_static,
+            PtrToStatic { ptr_kind: PointerKind::Ref(_) } => const_eval_validation_ref_to_static,
+
+            PointerAsInt { .. } => const_eval_validation_pointer_as_int,
+            PartialPointer => const_eval_validation_partial_pointer,
+            ConstRefToMutable => const_eval_validation_const_ref_to_mutable,
+            ConstRefToExtern => const_eval_validation_const_ref_to_extern,
+            MutableRefToImmutable => const_eval_validation_mutable_ref_to_immutable,
+            NullFnPtr => const_eval_validation_null_fn_ptr,
+            NeverVal => const_eval_validation_never_val,
+            NullablePtrOutOfRange { .. } => const_eval_validation_nullable_ptr_out_of_range,
+            PtrOutOfRange { .. } => const_eval_validation_ptr_out_of_range,
+            OutOfRange { .. } => const_eval_validation_out_of_range,
+            UnsafeCellInImmutable => const_eval_validation_unsafe_cell,
+            UninhabitedVal { .. } => const_eval_validation_uninhabited_val,
+            InvalidEnumTag { .. } => const_eval_validation_invalid_enum_tag,
+            UninhabitedEnumVariant => const_eval_validation_uninhabited_enum_variant,
+            Uninit { .. } => const_eval_validation_uninit,
+            InvalidVTablePtr { .. } => const_eval_validation_invalid_vtable_ptr,
+            InvalidMetaSliceTooLarge { ptr_kind: PointerKind::Box } => {
+                const_eval_validation_invalid_box_slice_meta
+            }
+            InvalidMetaSliceTooLarge { ptr_kind: PointerKind::Ref(_) } => {
+                const_eval_validation_invalid_ref_slice_meta
+            }
+
+            InvalidMetaTooLarge { ptr_kind: PointerKind::Box } => {
+                const_eval_validation_invalid_box_meta
+            }
+            InvalidMetaTooLarge { ptr_kind: PointerKind::Ref(_) } => {
+                const_eval_validation_invalid_ref_meta
+            }
+            UnalignedPtr { ptr_kind: PointerKind::Ref(_), .. } => {
+                const_eval_validation_unaligned_ref
+            }
+            UnalignedPtr { ptr_kind: PointerKind::Box, .. } => const_eval_validation_unaligned_box,
+
+            NullPtr { ptr_kind: PointerKind::Box } => const_eval_validation_null_box,
+            NullPtr { ptr_kind: PointerKind::Ref(_) } => const_eval_validation_null_ref,
+            DanglingPtrNoProvenance { ptr_kind: PointerKind::Box, .. } => {
+                const_eval_validation_dangling_box_no_provenance
+            }
+            DanglingPtrNoProvenance { ptr_kind: PointerKind::Ref(_), .. } => {
+                const_eval_validation_dangling_ref_no_provenance
+            }
+            DanglingPtrOutOfBounds { ptr_kind: PointerKind::Box } => {
+                const_eval_validation_dangling_box_out_of_bounds
+            }
+            DanglingPtrOutOfBounds { ptr_kind: PointerKind::Ref(_) } => {
+                const_eval_validation_dangling_ref_out_of_bounds
+            }
+            DanglingPtrUseAfterFree { ptr_kind: PointerKind::Box } => {
+                const_eval_validation_dangling_box_use_after_free
+            }
+            DanglingPtrUseAfterFree { ptr_kind: PointerKind::Ref(_) } => {
+                const_eval_validation_dangling_ref_use_after_free
+            }
+            InvalidBool { .. } => const_eval_validation_invalid_bool,
+            InvalidChar { .. } => const_eval_validation_invalid_char,
+            InvalidFnPtr { .. } => const_eval_validation_invalid_fn_ptr,
+        }
+    }
+
+    fn add_args<G: EmissionGuarantee>(self, err: &mut Diag<'_, G>) {
+        use crate::fluent_generated as fluent;
+        use rustc_middle::mir::interpret::ValidationErrorKind::*;
+
+        if let PointerAsInt { .. } | PartialPointer = self.kind {
+            err.help(fluent::const_eval_ptr_as_bytes_1);
+            err.help(fluent::const_eval_ptr_as_bytes_2);
+        }
+
+        let message = if let Some(path) = self.path {
+            err.dcx.eagerly_translate_to_string(
+                fluent::const_eval_validation_front_matter_invalid_value_with_path,
+                [("path".into(), DiagArgValue::Str(path.into()))].iter().map(|(a, b)| (a, b)),
+            )
+        } else {
+            err.dcx.eagerly_translate_to_string(
+                fluent::const_eval_validation_front_matter_invalid_value,
+                [].into_iter(),
+            )
+        };
+
+        err.arg("front_matter", message);
+
+        fn add_range_arg<G: EmissionGuarantee>(
+            r: WrappingRange,
+            max_hi: u128,
+            err: &mut Diag<'_, G>,
+        ) {
+            let WrappingRange { start: lo, end: hi } = r;
+            assert!(hi <= max_hi);
+            let msg = if lo > hi {
+                fluent::const_eval_range_wrapping
+            } else if lo == hi {
+                fluent::const_eval_range_singular
+            } else if lo == 0 {
+                assert!(hi < max_hi, "should not be printing if the range covers everything");
+                fluent::const_eval_range_upper
+            } else if hi == max_hi {
+                assert!(lo > 0, "should not be printing if the range covers everything");
+                fluent::const_eval_range_lower
+            } else {
+                fluent::const_eval_range
+            };
+
+            let args = [
+                ("lo".into(), DiagArgValue::Str(lo.to_string().into())),
+                ("hi".into(), DiagArgValue::Str(hi.to_string().into())),
+            ];
+            let args = args.iter().map(|(a, b)| (a, b));
+            let message = err.dcx.eagerly_translate_to_string(msg, args);
+            err.arg("in_range", message);
+        }
+
+        match self.kind {
+            PtrToUninhabited { ty, .. } | UninhabitedVal { ty } => {
+                err.arg("ty", ty);
+            }
+            PointerAsInt { expected } | Uninit { expected } => {
+                let msg = match expected {
+                    ExpectedKind::Reference => fluent::const_eval_validation_expected_ref,
+                    ExpectedKind::Box => fluent::const_eval_validation_expected_box,
+                    ExpectedKind::RawPtr => fluent::const_eval_validation_expected_raw_ptr,
+                    ExpectedKind::InitScalar => fluent::const_eval_validation_expected_init_scalar,
+                    ExpectedKind::Bool => fluent::const_eval_validation_expected_bool,
+                    ExpectedKind::Char => fluent::const_eval_validation_expected_char,
+                    ExpectedKind::Float => fluent::const_eval_validation_expected_float,
+                    ExpectedKind::Int => fluent::const_eval_validation_expected_int,
+                    ExpectedKind::FnPtr => fluent::const_eval_validation_expected_fn_ptr,
+                    ExpectedKind::EnumTag => fluent::const_eval_validation_expected_enum_tag,
+                    ExpectedKind::Str => fluent::const_eval_validation_expected_str,
+                };
+                let msg = err.dcx.eagerly_translate_to_string(msg, [].into_iter());
+                err.arg("expected", msg);
+            }
+            InvalidEnumTag { value }
+            | InvalidVTablePtr { value }
+            | InvalidBool { value }
+            | InvalidChar { value }
+            | InvalidFnPtr { value } => {
+                err.arg("value", value);
+            }
+            NullablePtrOutOfRange { range, max_value } | PtrOutOfRange { range, max_value } => {
+                add_range_arg(range, max_value, err)
+            }
+            OutOfRange { range, max_value, value } => {
+                err.arg("value", value);
+                add_range_arg(range, max_value, err);
+            }
+            UnalignedPtr { required_bytes, found_bytes, .. } => {
+                err.arg("required_bytes", required_bytes);
+                err.arg("found_bytes", found_bytes);
+            }
+            DanglingPtrNoProvenance { pointer, .. } => {
+                err.arg("pointer", pointer);
+            }
+            NullPtr { .. }
+            | PtrToStatic { .. }
+            | ConstRefToMutable
+            | ConstRefToExtern
+            | MutableRefToImmutable
+            | NullFnPtr
+            | NeverVal
+            | UnsafeCellInImmutable
+            | InvalidMetaSliceTooLarge { .. }
+            | InvalidMetaTooLarge { .. }
+            | DanglingPtrUseAfterFree { .. }
+            | DanglingPtrOutOfBounds { .. }
+            | UninhabitedEnumVariant
+            | PartialPointer => {}
+        }
+    }
+}
+
+impl ReportErrorExt for UnsupportedOpInfo {
+    fn diagnostic_message(&self) -> DiagMessage {
+        use crate::fluent_generated::*;
+        match self {
+            UnsupportedOpInfo::Unsupported(s) => s.clone().into(),
+            UnsupportedOpInfo::UnsizedLocal => const_eval_unsized_local,
+            UnsupportedOpInfo::OverwritePartialPointer(_) => const_eval_partial_pointer_overwrite,
+            UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_copy,
+            UnsupportedOpInfo::ReadPointerAsInt(_) => const_eval_read_pointer_as_int,
+            UnsupportedOpInfo::ThreadLocalStatic(_) => const_eval_thread_local_static,
+            UnsupportedOpInfo::ExternStatic(_) => const_eval_extern_static,
+        }
+    }
+    fn add_args<G: EmissionGuarantee>(self, diag: &mut Diag<'_, G>) {
+        use crate::fluent_generated::*;
+
+        use UnsupportedOpInfo::*;
+        if let ReadPointerAsInt(_) | OverwritePartialPointer(_) | ReadPartialPointer(_) = self {
+            diag.help(const_eval_ptr_as_bytes_1);
+            diag.help(const_eval_ptr_as_bytes_2);
+        }
+        match self {
+            // `ReadPointerAsInt(Some(info))` is never printed anyway, it only serves as an error to
+            // be further processed by validity checking which then turns it into something nice to
+            // print. So it's not worth the effort of having diagnostics that can print the `info`.
+            UnsizedLocal | Unsupported(_) | ReadPointerAsInt(_) => {}
+            OverwritePartialPointer(ptr) | ReadPartialPointer(ptr) => {
+                diag.arg("ptr", ptr);
+            }
+            ThreadLocalStatic(did) | ExternStatic(did) => {
+                diag.arg("did", format!("{did:?}"));
+            }
+        }
+    }
+}
+
+impl<'tcx> ReportErrorExt for InterpError<'tcx> {
+    fn diagnostic_message(&self) -> DiagMessage {
+        match self {
+            InterpError::UndefinedBehavior(ub) => ub.diagnostic_message(),
+            InterpError::Unsupported(e) => e.diagnostic_message(),
+            InterpError::InvalidProgram(e) => e.diagnostic_message(),
+            InterpError::ResourceExhaustion(e) => e.diagnostic_message(),
+            InterpError::MachineStop(e) => e.diagnostic_message(),
+        }
+    }
+    fn add_args<G: EmissionGuarantee>(self, diag: &mut Diag<'_, G>) {
+        match self {
+            InterpError::UndefinedBehavior(ub) => ub.add_args(diag),
+            InterpError::Unsupported(e) => e.add_args(diag),
+            InterpError::InvalidProgram(e) => e.add_args(diag),
+            InterpError::ResourceExhaustion(e) => e.add_args(diag),
+            InterpError::MachineStop(e) => e.add_args(&mut |name, value| {
+                diag.arg(name, value);
+            }),
+        }
+    }
+}
+
+impl<'tcx> ReportErrorExt for InvalidProgramInfo<'tcx> {
+    fn diagnostic_message(&self) -> DiagMessage {
+        use crate::fluent_generated::*;
+        match self {
+            InvalidProgramInfo::TooGeneric => const_eval_too_generic,
+            InvalidProgramInfo::AlreadyReported(_) => const_eval_already_reported,
+            InvalidProgramInfo::Layout(e) => e.diagnostic_message(),
+            InvalidProgramInfo::FnAbiAdjustForForeignAbi(_) => {
+                rustc_middle::error::middle_adjust_for_foreign_abi_error
+            }
+        }
+    }
+    fn add_args<G: EmissionGuarantee>(self, diag: &mut Diag<'_, G>) {
+        match self {
+            InvalidProgramInfo::TooGeneric | InvalidProgramInfo::AlreadyReported(_) => {}
+            InvalidProgramInfo::Layout(e) => {
+                // The level doesn't matter, `dummy_diag` is consumed without it being used.
+                let dummy_level = Level::Bug;
+                let dummy_diag: Diag<'_, ()> = e.into_diagnostic().into_diag(diag.dcx, dummy_level);
+                for (name, val) in dummy_diag.args.iter() {
+                    diag.arg(name.clone(), val.clone());
+                }
+                dummy_diag.cancel();
+            }
+            InvalidProgramInfo::FnAbiAdjustForForeignAbi(
+                AdjustForForeignAbiError::Unsupported { arch, abi },
+            ) => {
+                diag.arg("arch", arch);
+                diag.arg("abi", abi.name());
+            }
+        }
+    }
+}
+
+impl ReportErrorExt for ResourceExhaustionInfo {
+    fn diagnostic_message(&self) -> DiagMessage {
+        use crate::fluent_generated::*;
+        match self {
+            ResourceExhaustionInfo::StackFrameLimitReached => const_eval_stack_frame_limit_reached,
+            ResourceExhaustionInfo::MemoryExhausted => const_eval_memory_exhausted,
+            ResourceExhaustionInfo::AddressSpaceFull => const_eval_address_space_full,
+        }
+    }
+    fn add_args<G: EmissionGuarantee>(self, _: &mut Diag<'_, G>) {}
+}
+
+impl rustc_errors::IntoDiagArg for InternKind {
+    fn into_diag_arg(self) -> DiagArgValue {
+        DiagArgValue::Str(Cow::Borrowed(match self {
+            InternKind::Static(Mutability::Not) => "static",
+            InternKind::Static(Mutability::Mut) => "static_mut",
+            InternKind::Constant => "const",
+            InternKind::Promoted => "promoted",
+        }))
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
new file mode 100644
index 00000000000..2cebea9d145
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -0,0 +1,481 @@
+use std::assert_matches::assert_matches;
+
+use rustc_apfloat::ieee::{Double, Single};
+use rustc_apfloat::{Float, FloatConvert};
+use rustc_middle::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
+use rustc_middle::mir::CastKind;
+use rustc_middle::ty::adjustment::PointerCoercion;
+use rustc_middle::ty::layout::{IntegerExt, LayoutOf, TyAndLayout};
+use rustc_middle::ty::{self, FloatTy, Ty, TypeAndMut};
+use rustc_target::abi::Integer;
+use rustc_type_ir::TyKind::*;
+
+use super::{
+    util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy,
+};
+
+use crate::fluent_generated as fluent;
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    pub fn cast(
+        &mut self,
+        src: &OpTy<'tcx, M::Provenance>,
+        cast_kind: CastKind,
+        cast_ty: Ty<'tcx>,
+        dest: &PlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        // `cast_ty` will often be the same as `dest.ty`, but not always, since subtyping is still
+        // possible.
+        let cast_layout =
+            if cast_ty == dest.layout.ty { dest.layout } else { self.layout_of(cast_ty)? };
+        // FIXME: In which cases should we trigger UB when the source is uninit?
+        match cast_kind {
+            CastKind::PointerCoercion(PointerCoercion::Unsize) => {
+                self.unsize_into(src, cast_layout, dest)?;
+            }
+
+            CastKind::PointerExposeAddress => {
+                let src = self.read_immediate(src)?;
+                let res = self.pointer_expose_address_cast(&src, cast_layout)?;
+                self.write_immediate(*res, dest)?;
+            }
+
+            CastKind::PointerFromExposedAddress => {
+                let src = self.read_immediate(src)?;
+                let res = self.pointer_from_exposed_address_cast(&src, cast_layout)?;
+                self.write_immediate(*res, dest)?;
+            }
+
+            CastKind::IntToInt | CastKind::IntToFloat => {
+                let src = self.read_immediate(src)?;
+                let res = self.int_to_int_or_float(&src, cast_layout)?;
+                self.write_immediate(*res, dest)?;
+            }
+
+            CastKind::FloatToFloat | CastKind::FloatToInt => {
+                let src = self.read_immediate(src)?;
+                let res = self.float_to_float_or_int(&src, cast_layout)?;
+                self.write_immediate(*res, dest)?;
+            }
+
+            CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
+                let src = self.read_immediate(src)?;
+                let res = self.ptr_to_ptr(&src, cast_layout)?;
+                self.write_immediate(*res, dest)?;
+            }
+
+            CastKind::PointerCoercion(
+                PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer,
+            ) => {
+                // These are NOPs, but can be wide pointers.
+                let v = self.read_immediate(src)?;
+                self.write_immediate(*v, dest)?;
+            }
+
+            CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer) => {
+                // All reifications must be monomorphic, bail out otherwise.
+                ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+
+                // The src operand does not matter, just its type
+                match *src.layout.ty.kind() {
+                    ty::FnDef(def_id, args) => {
+                        let instance = ty::Instance::resolve_for_fn_ptr(
+                            *self.tcx,
+                            self.param_env,
+                            def_id,
+                            args,
+                        )
+                        .ok_or_else(|| err_inval!(TooGeneric))?;
+
+                        let fn_ptr = self.fn_ptr(FnVal::Instance(instance));
+                        self.write_pointer(fn_ptr, dest)?;
+                    }
+                    _ => span_bug!(self.cur_span(), "reify fn pointer on {}", src.layout.ty),
+                }
+            }
+
+            CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer) => {
+                let src = self.read_immediate(src)?;
+                match cast_ty.kind() {
+                    ty::FnPtr(_) => {
+                        // No change to value
+                        self.write_immediate(*src, dest)?;
+                    }
+                    _ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {}", cast_ty),
+                }
+            }
+
+            CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_)) => {
+                // All reifications must be monomorphic, bail out otherwise.
+                ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+
+                // The src operand does not matter, just its type
+                match *src.layout.ty.kind() {
+                    ty::Closure(def_id, args) => {
+                        let instance = ty::Instance::resolve_closure(
+                            *self.tcx,
+                            def_id,
+                            args,
+                            ty::ClosureKind::FnOnce,
+                        );
+                        let fn_ptr = self.fn_ptr(FnVal::Instance(instance));
+                        self.write_pointer(fn_ptr, dest)?;
+                    }
+                    _ => span_bug!(self.cur_span(), "closure fn pointer on {}", src.layout.ty),
+                }
+            }
+
+            CastKind::DynStar => {
+                if let ty::Dynamic(data, _, ty::DynStar) = cast_ty.kind() {
+                    // Initial cast from sized to dyn trait
+                    let vtable = self.get_vtable_ptr(src.layout.ty, data.principal())?;
+                    let vtable = Scalar::from_maybe_pointer(vtable, self);
+                    let data = self.read_immediate(src)?.to_scalar();
+                    let _assert_pointer_like = data.to_pointer(self)?;
+                    let val = Immediate::ScalarPair(data, vtable);
+                    self.write_immediate(val, dest)?;
+                } else {
+                    bug!()
+                }
+            }
+
+            CastKind::Transmute => {
+                assert!(src.layout.is_sized());
+                assert!(dest.layout.is_sized());
+                assert_eq!(cast_ty, dest.layout.ty); // we otherwise ignore `cast_ty` enirely...
+                if src.layout.size != dest.layout.size {
+                    throw_ub_custom!(
+                        fluent::const_eval_invalid_transmute,
+                        src_bytes = src.layout.size.bytes(),
+                        dest_bytes = dest.layout.size.bytes(),
+                        src = src.layout.ty,
+                        dest = dest.layout.ty,
+                    );
+                }
+
+                self.copy_op_allow_transmute(src, dest)?;
+            }
+        }
+        Ok(())
+    }
+
+    /// Handles 'IntToInt' and 'IntToFloat' casts.
+    pub fn int_to_int_or_float(
+        &self,
+        src: &ImmTy<'tcx, M::Provenance>,
+        cast_to: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+        assert!(src.layout.ty.is_integral() || src.layout.ty.is_char() || src.layout.ty.is_bool());
+        assert!(cast_to.ty.is_floating_point() || cast_to.ty.is_integral() || cast_to.ty.is_char());
+
+        Ok(ImmTy::from_scalar(
+            self.cast_from_int_like(src.to_scalar(), src.layout, cast_to.ty)?,
+            cast_to,
+        ))
+    }
+
+    /// Handles 'FloatToFloat' and 'FloatToInt' casts.
+    pub fn float_to_float_or_int(
+        &self,
+        src: &ImmTy<'tcx, M::Provenance>,
+        cast_to: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+        use rustc_type_ir::TyKind::*;
+
+        let Float(fty) = src.layout.ty.kind() else {
+            bug!("FloatToFloat/FloatToInt cast: source type {} is not a float type", src.layout.ty)
+        };
+        let val = match fty {
+            FloatTy::F16 => unimplemented!("f16_f128"),
+            FloatTy::F32 => self.cast_from_float(src.to_scalar().to_f32()?, cast_to.ty),
+            FloatTy::F64 => self.cast_from_float(src.to_scalar().to_f64()?, cast_to.ty),
+            FloatTy::F128 => unimplemented!("f16_f128"),
+        };
+        Ok(ImmTy::from_scalar(val, cast_to))
+    }
+
+    /// Handles 'FnPtrToPtr' and 'PtrToPtr' casts.
+    pub fn ptr_to_ptr(
+        &self,
+        src: &ImmTy<'tcx, M::Provenance>,
+        cast_to: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+        assert!(src.layout.ty.is_any_ptr());
+        assert!(cast_to.ty.is_unsafe_ptr());
+        // Handle casting any ptr to raw ptr (might be a fat ptr).
+        if cast_to.size == src.layout.size {
+            // Thin or fat pointer that just hast the ptr kind of target type changed.
+            return Ok(ImmTy::from_immediate(**src, cast_to));
+        } else {
+            // Casting the metadata away from a fat ptr.
+            assert_eq!(src.layout.size, 2 * self.pointer_size());
+            assert_eq!(cast_to.size, self.pointer_size());
+            assert!(src.layout.ty.is_unsafe_ptr());
+            return match **src {
+                Immediate::ScalarPair(data, _) => Ok(ImmTy::from_scalar(data, cast_to)),
+                Immediate::Scalar(..) => span_bug!(
+                    self.cur_span(),
+                    "{:?} input to a fat-to-thin cast ({} -> {})",
+                    *src,
+                    src.layout.ty,
+                    cast_to.ty
+                ),
+                Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
+            };
+        }
+    }
+
+    pub fn pointer_expose_address_cast(
+        &mut self,
+        src: &ImmTy<'tcx, M::Provenance>,
+        cast_to: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+        assert_matches!(src.layout.ty.kind(), ty::RawPtr(_) | ty::FnPtr(_));
+        assert!(cast_to.ty.is_integral());
+
+        let scalar = src.to_scalar();
+        let ptr = scalar.to_pointer(self)?;
+        match ptr.into_pointer_or_addr() {
+            Ok(ptr) => M::expose_ptr(self, ptr)?,
+            Err(_) => {} // Do nothing, exposing an invalid pointer (`None` provenance) is a NOP.
+        };
+        Ok(ImmTy::from_scalar(self.cast_from_int_like(scalar, src.layout, cast_to.ty)?, cast_to))
+    }
+
+    pub fn pointer_from_exposed_address_cast(
+        &self,
+        src: &ImmTy<'tcx, M::Provenance>,
+        cast_to: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+        assert!(src.layout.ty.is_integral());
+        assert_matches!(cast_to.ty.kind(), ty::RawPtr(_));
+
+        // First cast to usize.
+        let scalar = src.to_scalar();
+        let addr = self.cast_from_int_like(scalar, src.layout, self.tcx.types.usize)?;
+        let addr = addr.to_target_usize(self)?;
+
+        // Then turn address into pointer.
+        let ptr = M::ptr_from_addr_cast(self, addr)?;
+        Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(ptr, self), cast_to))
+    }
+
+    /// Low-level cast helper function. This works directly on scalars and can take 'int-like' input
+    /// type (basically everything with a scalar layout) to int/float/char types.
+    fn cast_from_int_like(
+        &self,
+        scalar: Scalar<M::Provenance>, // input value (there is no ScalarTy so we separate data+layout)
+        src_layout: TyAndLayout<'tcx>,
+        cast_ty: Ty<'tcx>,
+    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+        // Let's make sure v is sign-extended *if* it has a signed type.
+        let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`.
+
+        let v = scalar.to_bits(src_layout.size)?;
+        let v = if signed { self.sign_extend(v, src_layout) } else { v };
+        trace!("cast_from_scalar: {}, {} -> {}", v, src_layout.ty, cast_ty);
+
+        Ok(match *cast_ty.kind() {
+            // int -> int
+            Int(_) | Uint(_) => {
+                let size = match *cast_ty.kind() {
+                    Int(t) => Integer::from_int_ty(self, t).size(),
+                    Uint(t) => Integer::from_uint_ty(self, t).size(),
+                    _ => bug!(),
+                };
+                let v = size.truncate(v);
+                Scalar::from_uint(v, size)
+            }
+
+            // signed int -> float
+            Float(fty) if signed => {
+                let v = v as i128;
+                match fty {
+                    FloatTy::F16 => unimplemented!("f16_f128"),
+                    FloatTy::F32 => Scalar::from_f32(Single::from_i128(v).value),
+                    FloatTy::F64 => Scalar::from_f64(Double::from_i128(v).value),
+                    FloatTy::F128 => unimplemented!("f16_f128"),
+                }
+            }
+            // unsigned int -> float
+            Float(fty) => match fty {
+                FloatTy::F16 => unimplemented!("f16_f128"),
+                FloatTy::F32 => Scalar::from_f32(Single::from_u128(v).value),
+                FloatTy::F64 => Scalar::from_f64(Double::from_u128(v).value),
+                FloatTy::F128 => unimplemented!("f16_f128"),
+            },
+
+            // u8 -> char
+            Char => Scalar::from_u32(u8::try_from(v).unwrap().into()),
+
+            // Casts to bool are not permitted by rustc, no need to handle them here.
+            _ => span_bug!(self.cur_span(), "invalid int to {} cast", cast_ty),
+        })
+    }
+
+    /// Low-level cast helper function. Converts an apfloat `f` into int or float types.
+    fn cast_from_float<F>(&self, f: F, dest_ty: Ty<'tcx>) -> Scalar<M::Provenance>
+    where
+        F: Float + Into<Scalar<M::Provenance>> + FloatConvert<Single> + FloatConvert<Double>,
+    {
+        use rustc_type_ir::TyKind::*;
+
+        fn adjust_nan<
+            'mir,
+            'tcx: 'mir,
+            M: Machine<'mir, 'tcx>,
+            F1: rustc_apfloat::Float + FloatConvert<F2>,
+            F2: rustc_apfloat::Float,
+        >(
+            ecx: &InterpCx<'mir, 'tcx, M>,
+            f1: F1,
+            f2: F2,
+        ) -> F2 {
+            if f2.is_nan() { M::generate_nan(ecx, &[f1]) } else { f2 }
+        }
+
+        match *dest_ty.kind() {
+            // float -> uint
+            Uint(t) => {
+                let size = Integer::from_uint_ty(self, t).size();
+                // `to_u128` is a saturating cast, which is what we need
+                // (https://doc.rust-lang.org/nightly/nightly-rustc/rustc_apfloat/trait.Float.html#method.to_i128_r).
+                let v = f.to_u128(size.bits_usize()).value;
+                // This should already fit the bit width
+                Scalar::from_uint(v, size)
+            }
+            // float -> int
+            Int(t) => {
+                let size = Integer::from_int_ty(self, t).size();
+                // `to_i128` is a saturating cast, which is what we need
+                // (https://doc.rust-lang.org/nightly/nightly-rustc/rustc_apfloat/trait.Float.html#method.to_i128_r).
+                let v = f.to_i128(size.bits_usize()).value;
+                Scalar::from_int(v, size)
+            }
+            // float -> float
+            Float(fty) => match fty {
+                FloatTy::F16 => unimplemented!("f16_f128"),
+                FloatTy::F32 => Scalar::from_f32(adjust_nan(self, f, f.convert(&mut false).value)),
+                FloatTy::F64 => Scalar::from_f64(adjust_nan(self, f, f.convert(&mut false).value)),
+                FloatTy::F128 => unimplemented!("f16_f128"),
+            },
+            // That's it.
+            _ => span_bug!(self.cur_span(), "invalid float to {} cast", dest_ty),
+        }
+    }
+
+    /// `src` is a *pointer to* a `source_ty`, and in `dest` we should store a pointer to th same
+    /// data at type `cast_ty`.
+    fn unsize_into_ptr(
+        &mut self,
+        src: &OpTy<'tcx, M::Provenance>,
+        dest: &PlaceTy<'tcx, M::Provenance>,
+        // The pointee types
+        source_ty: Ty<'tcx>,
+        cast_ty: Ty<'tcx>,
+    ) -> InterpResult<'tcx> {
+        // A<Struct> -> A<Trait> conversion
+        let (src_pointee_ty, dest_pointee_ty) =
+            self.tcx.struct_lockstep_tails_erasing_lifetimes(source_ty, cast_ty, self.param_env);
+
+        match (&src_pointee_ty.kind(), &dest_pointee_ty.kind()) {
+            (&ty::Array(_, length), &ty::Slice(_)) => {
+                let ptr = self.read_pointer(src)?;
+                // u64 cast is from usize to u64, which is always good
+                let val = Immediate::new_slice(
+                    ptr,
+                    length.eval_target_usize(*self.tcx, self.param_env),
+                    self,
+                );
+                self.write_immediate(val, dest)
+            }
+            (ty::Dynamic(data_a, _, ty::Dyn), ty::Dynamic(data_b, _, ty::Dyn)) => {
+                let val = self.read_immediate(src)?;
+                if data_a.principal() == data_b.principal() {
+                    // A NOP cast that doesn't actually change anything, should be allowed even with mismatching vtables.
+                    return self.write_immediate(*val, dest);
+                }
+                let (old_data, old_vptr) = val.to_scalar_pair();
+                let old_data = old_data.to_pointer(self)?;
+                let old_vptr = old_vptr.to_pointer(self)?;
+                let (ty, old_trait) = self.get_ptr_vtable(old_vptr)?;
+                if old_trait != data_a.principal() {
+                    throw_ub_custom!(fluent::const_eval_upcast_mismatch);
+                }
+                let new_vptr = self.get_vtable_ptr(ty, data_b.principal())?;
+                self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest)
+            }
+            (_, &ty::Dynamic(data, _, ty::Dyn)) => {
+                // Initial cast from sized to dyn trait
+                let vtable = self.get_vtable_ptr(src_pointee_ty, data.principal())?;
+                let ptr = self.read_pointer(src)?;
+                let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx);
+                self.write_immediate(val, dest)
+            }
+            _ => {
+                // Do not ICE if we are not monomorphic enough.
+                ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+                ensure_monomorphic_enough(*self.tcx, cast_ty)?;
+
+                span_bug!(
+                    self.cur_span(),
+                    "invalid pointer unsizing {} -> {}",
+                    src.layout.ty,
+                    cast_ty
+                )
+            }
+        }
+    }
+
+    pub fn unsize_into(
+        &mut self,
+        src: &OpTy<'tcx, M::Provenance>,
+        cast_ty: TyAndLayout<'tcx>,
+        dest: &PlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        trace!("Unsizing {:?} of type {} into {}", *src, src.layout.ty, cast_ty.ty);
+        match (&src.layout.ty.kind(), &cast_ty.ty.kind()) {
+            (&ty::Ref(_, s, _), &ty::Ref(_, c, _) | &ty::RawPtr(TypeAndMut { ty: c, .. }))
+            | (&ty::RawPtr(TypeAndMut { ty: s, .. }), &ty::RawPtr(TypeAndMut { ty: c, .. })) => {
+                self.unsize_into_ptr(src, dest, *s, *c)
+            }
+            (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+                assert_eq!(def_a, def_b); // implies same number of fields
+
+                // Unsizing of generic struct with pointer fields, like `Arc<T>` -> `Arc<Trait>`.
+                // There can be extra fields as long as they don't change their type or are 1-ZST.
+                // There might also be no field that actually needs unsizing.
+                let mut found_cast_field = false;
+                for i in 0..src.layout.fields.count() {
+                    let cast_ty_field = cast_ty.field(self, i);
+                    let src_field = self.project_field(src, i)?;
+                    let dst_field = self.project_field(dest, i)?;
+                    if src_field.layout.is_1zst() && cast_ty_field.is_1zst() {
+                        // Skip 1-ZST fields.
+                    } else if src_field.layout.ty == cast_ty_field.ty {
+                        self.copy_op(&src_field, &dst_field)?;
+                    } else {
+                        if found_cast_field {
+                            span_bug!(self.cur_span(), "unsize_into: more than one field to cast");
+                        }
+                        found_cast_field = true;
+                        self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
+                    }
+                }
+                Ok(())
+            }
+            _ => {
+                // Do not ICE if we are not monomorphic enough.
+                ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+                ensure_monomorphic_enough(*self.tcx, cast_ty.ty)?;
+
+                span_bug!(
+                    self.cur_span(),
+                    "unsize_into: invalid conversion: {:?} -> {:?}",
+                    src.layout,
+                    dest.layout
+                )
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
new file mode 100644
index 00000000000..6d4f6d0cb3c
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -0,0 +1,280 @@
+//! Functions for reading and writing discriminants of multi-variant layouts (enums and coroutines).
+
+use rustc_middle::mir;
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt};
+use rustc_middle::ty::{self, Ty};
+use rustc_target::abi::{self, TagEncoding};
+use rustc_target::abi::{VariantIdx, Variants};
+
+use super::{ImmTy, InterpCx, InterpResult, Machine, Readable, Scalar, Writeable};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Writes the discriminant of the given variant.
+    ///
+    /// If the variant is uninhabited, this is UB.
+    #[instrument(skip(self), level = "trace")]
+    pub fn write_discriminant(
+        &mut self,
+        variant_index: VariantIdx,
+        dest: &impl Writeable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        // Layout computation excludes uninhabited variants from consideration
+        // therefore there's no way to represent those variants in the given layout.
+        // Essentially, uninhabited variants do not have a tag that corresponds to their
+        // discriminant, so we cannot do anything here.
+        // When evaluating we will always error before even getting here, but ConstProp 'executes'
+        // dead code, so we cannot ICE here.
+        if dest.layout().for_variant(self, variant_index).abi.is_uninhabited() {
+            throw_ub!(UninhabitedEnumVariantWritten(variant_index))
+        }
+
+        match dest.layout().variants {
+            abi::Variants::Single { index } => {
+                assert_eq!(index, variant_index);
+            }
+            abi::Variants::Multiple {
+                tag_encoding: TagEncoding::Direct,
+                tag: tag_layout,
+                tag_field,
+                ..
+            } => {
+                // No need to validate that the discriminant here because the
+                // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+                let discr_val = dest
+                    .layout()
+                    .ty
+                    .discriminant_for_variant(*self.tcx, variant_index)
+                    .unwrap()
+                    .val;
+
+                // raw discriminants for enums are isize or bigger during
+                // their computation, but the in-memory tag is the smallest possible
+                // representation
+                let size = tag_layout.size(self);
+                let tag_val = size.truncate(discr_val);
+
+                let tag_dest = self.project_field(dest, tag_field)?;
+                self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
+            }
+            abi::Variants::Multiple {
+                tag_encoding:
+                    TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
+                tag: tag_layout,
+                tag_field,
+                ..
+            } => {
+                // No need to validate that the discriminant here because the
+                // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+                if variant_index != untagged_variant {
+                    let variants_start = niche_variants.start().as_u32();
+                    let variant_index_relative = variant_index
+                        .as_u32()
+                        .checked_sub(variants_start)
+                        .expect("overflow computing relative variant idx");
+                    // We need to use machine arithmetic when taking into account `niche_start`:
+                    // tag_val = variant_index_relative + niche_start_val
+                    let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?;
+                    let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+                    let variant_index_relative_val =
+                        ImmTy::from_uint(variant_index_relative, tag_layout);
+                    let tag_val = self.wrapping_binary_op(
+                        mir::BinOp::Add,
+                        &variant_index_relative_val,
+                        &niche_start_val,
+                    )?;
+                    // Write result.
+                    let niche_dest = self.project_field(dest, tag_field)?;
+                    self.write_immediate(*tag_val, &niche_dest)?;
+                } else {
+                    // The untagged variant is implicitly encoded simply by having a value that is
+                    // outside the niche variants. But what if the data stored here does not
+                    // actually encode this variant? That would be bad! So let's double-check...
+                    let actual_variant = self.read_discriminant(&dest.to_op(self)?)?;
+                    if actual_variant != variant_index {
+                        throw_ub!(InvalidNichedEnumVariantWritten { enum_ty: dest.layout().ty });
+                    }
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    /// Read discriminant, return the runtime value as well as the variant index.
+    /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
+    ///
+    /// Will never return an uninhabited variant.
+    #[instrument(skip(self), level = "trace")]
+    pub fn read_discriminant(
+        &self,
+        op: &impl Readable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, VariantIdx> {
+        let ty = op.layout().ty;
+        trace!("read_discriminant_value {:#?}", op.layout());
+        // Get type and layout of the discriminant.
+        let discr_layout = self.layout_of(ty.discriminant_ty(*self.tcx))?;
+        trace!("discriminant type: {:?}", discr_layout.ty);
+
+        // We use "discriminant" to refer to the value associated with a particular enum variant.
+        // This is not to be confused with its "variant index", which is just determining its position in the
+        // declared list of variants -- they can differ with explicitly assigned discriminants.
+        // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
+        // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
+        let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout().variants {
+            Variants::Single { index } => {
+                // Do some extra checks on enums.
+                if ty.is_enum() {
+                    // Hilariously, `Single` is used even for 0-variant enums.
+                    // (See https://github.com/rust-lang/rust/issues/89765).
+                    if matches!(ty.kind(), ty::Adt(def, ..) if def.variants().is_empty()) {
+                        throw_ub!(UninhabitedEnumVariantRead(index))
+                    }
+                    // For consistency with `write_discriminant`, and to make sure that
+                    // `project_downcast` cannot fail due to strange layouts, we declare immediate UB
+                    // for uninhabited variants.
+                    if op.layout().for_variant(self, index).abi.is_uninhabited() {
+                        throw_ub!(UninhabitedEnumVariantRead(index))
+                    }
+                }
+                return Ok(index);
+            }
+            Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
+                (tag, tag_encoding, tag_field)
+            }
+        };
+
+        // There are *three* layouts that come into play here:
+        // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
+        //   the `Scalar` we return.
+        // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
+        //   and used to interpret the value we read from the tag field.
+        //   For the return value, a cast to `discr_layout` is performed.
+        // - The field storing the tag has a layout, which is very similar to `tag_layout` but
+        //   may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
+
+        // Get layout for tag.
+        let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
+
+        // Read tag and sanity-check `tag_layout`.
+        let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?;
+        assert_eq!(tag_layout.size, tag_val.layout.size);
+        assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
+        trace!("tag value: {}", tag_val);
+
+        // Figure out which discriminant and variant this corresponds to.
+        let index = match *tag_encoding {
+            TagEncoding::Direct => {
+                // Generate a specific error if `tag_val` is not an integer.
+                // (`tag_bits` itself is only used for error messages below.)
+                let tag_bits = tag_val
+                    .to_scalar()
+                    .try_to_int()
+                    .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
+                    .assert_bits(tag_layout.size);
+                // Cast bits from tag layout to discriminant layout.
+                // After the checks we did above, this cannot fail, as
+                // discriminants are int-like.
+                let discr_val = self.int_to_int_or_float(&tag_val, discr_layout).unwrap();
+                let discr_bits = discr_val.to_scalar().assert_bits(discr_layout.size);
+                // Convert discriminant to variant index, and catch invalid discriminants.
+                let index = match *ty.kind() {
+                    ty::Adt(adt, _) => {
+                        adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
+                    }
+                    ty::Coroutine(def_id, args) => {
+                        let args = args.as_coroutine();
+                        args.discriminants(def_id, *self.tcx).find(|(_, var)| var.val == discr_bits)
+                    }
+                    _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-coroutine"),
+                }
+                .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
+                // Return the cast value, and the index.
+                index.0
+            }
+            TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
+                let tag_val = tag_val.to_scalar();
+                // Compute the variant this niche value/"tag" corresponds to. With niche layout,
+                // discriminant (encoded in niche/tag) and variant index are the same.
+                let variants_start = niche_variants.start().as_u32();
+                let variants_end = niche_variants.end().as_u32();
+                let variant = match tag_val.try_to_int() {
+                    Err(dbg_val) => {
+                        // So this is a pointer then, and casting to an int failed.
+                        // Can only happen during CTFE.
+                        // The niche must be just 0, and the ptr not null, then we know this is
+                        // okay. Everything else, we conservatively reject.
+                        let ptr_valid = niche_start == 0
+                            && variants_start == variants_end
+                            && !self.scalar_may_be_null(tag_val)?;
+                        if !ptr_valid {
+                            throw_ub!(InvalidTag(dbg_val))
+                        }
+                        untagged_variant
+                    }
+                    Ok(tag_bits) => {
+                        let tag_bits = tag_bits.assert_bits(tag_layout.size);
+                        // We need to use machine arithmetic to get the relative variant idx:
+                        // variant_index_relative = tag_val - niche_start_val
+                        let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
+                        let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+                        let variant_index_relative_val =
+                            self.wrapping_binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
+                        let variant_index_relative =
+                            variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
+                        // Check if this is in the range that indicates an actual discriminant.
+                        if variant_index_relative <= u128::from(variants_end - variants_start) {
+                            let variant_index_relative = u32::try_from(variant_index_relative)
+                                .expect("we checked that this fits into a u32");
+                            // Then computing the absolute variant idx should not overflow any more.
+                            let variant_index = VariantIdx::from_u32(
+                                variants_start
+                                    .checked_add(variant_index_relative)
+                                    .expect("overflow computing absolute variant idx"),
+                            );
+                            let variants =
+                                ty.ty_adt_def().expect("tagged layout for non adt").variants();
+                            assert!(variant_index < variants.next_index());
+                            variant_index
+                        } else {
+                            untagged_variant
+                        }
+                    }
+                };
+                // Compute the size of the scalar we need to return.
+                // No need to cast, because the variant index directly serves as discriminant and is
+                // encoded in the tag.
+                variant
+            }
+        };
+        // Reading the discriminant of an uninhabited variant is UB. This is the basis for the
+        // `uninhabited_enum_branching` MIR pass. It also ensures consistency with
+        // `write_discriminant`.
+        if op.layout().for_variant(self, index).abi.is_uninhabited() {
+            throw_ub!(UninhabitedEnumVariantRead(index))
+        }
+        Ok(index)
+    }
+
+    pub fn discriminant_for_variant(
+        &self,
+        ty: Ty<'tcx>,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+        let discr_layout = self.layout_of(ty.discriminant_ty(*self.tcx))?;
+        let discr_value = match ty.discriminant_for_variant(*self.tcx, variant) {
+            Some(discr) => {
+                // This type actually has discriminants.
+                assert_eq!(discr.ty, discr_layout.ty);
+                Scalar::from_uint(discr.val, discr_layout.size)
+            }
+            None => {
+                // On a type without actual discriminants, variant is 0.
+                assert_eq!(variant.as_u32(), 0);
+                Scalar::from_uint(variant.as_u32(), discr_layout.size)
+            }
+        };
+        Ok(ImmTy::from_scalar(discr_value, discr_layout))
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
new file mode 100644
index 00000000000..09e9cc4b35d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -0,0 +1,1270 @@
+use std::cell::Cell;
+use std::{fmt, mem};
+
+use either::{Either, Left, Right};
+
+use hir::CRATE_HIR_ID;
+use rustc_errors::DiagCtxt;
+use rustc_hir::{self as hir, def_id::DefId, definitions::DefPathData};
+use rustc_index::IndexVec;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{
+    CtfeProvenance, ErrorHandled, InvalidMetaKind, ReportedErrorInfo,
+};
+use rustc_middle::query::TyCtxtAt;
+use rustc_middle::ty::layout::{
+    self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers,
+    TyAndLayout,
+};
+use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, Variance};
+use rustc_mir_dataflow::storage::always_storage_live_locals;
+use rustc_session::Limit;
+use rustc_span::Span;
+use rustc_target::abi::{call::FnAbi, Align, HasDataLayout, Size, TargetDataLayout};
+
+use super::{
+    GlobalId, Immediate, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta,
+    Memory, MemoryKind, OpTy, Operand, Place, PlaceTy, Pointer, PointerArithmetic, Projectable,
+    Provenance, Scalar, StackPopJump,
+};
+use crate::errors;
+use crate::util;
+use crate::{fluent_generated as fluent, ReportErrorExt};
+
+pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    /// Stores the `Machine` instance.
+    ///
+    /// Note: the stack is provided by the machine.
+    pub machine: M,
+
+    /// The results of the type checker, from rustc.
+    /// The span in this is the "root" of the evaluation, i.e., the const
+    /// we are evaluating (if this is CTFE).
+    pub tcx: TyCtxtAt<'tcx>,
+
+    /// Bounds in scope for polymorphic evaluations.
+    pub(crate) param_env: ty::ParamEnv<'tcx>,
+
+    /// The virtual memory system.
+    pub memory: Memory<'mir, 'tcx, M>,
+
+    /// The recursion limit (cached from `tcx.recursion_limit(())`)
+    pub recursion_limit: Limit,
+}
+
+// The Phantomdata exists to prevent this type from being `Send`. If it were sent across a thread
+// boundary and dropped in the other thread, it would exit the span in the other thread.
+struct SpanGuard(tracing::Span, std::marker::PhantomData<*const u8>);
+
+impl SpanGuard {
+    /// By default a `SpanGuard` does nothing.
+    fn new() -> Self {
+        Self(tracing::Span::none(), std::marker::PhantomData)
+    }
+
+    /// If a span is entered, we exit the previous span (if any, normally none) and enter the
+    /// new span. This is mainly so we don't have to use `Option` for the `tracing_span` field of
+    /// `Frame` by creating a dummy span to being with and then entering it once the frame has
+    /// been pushed.
+    fn enter(&mut self, span: tracing::Span) {
+        // This executes the destructor on the previous instance of `SpanGuard`, ensuring that
+        // we never enter or exit more spans than vice versa. Unless you `mem::leak`, then we
+        // can't protect the tracing stack, but that'll just lead to weird logging, no actual
+        // problems.
+        *self = Self(span, std::marker::PhantomData);
+        self.0.with_subscriber(|(id, dispatch)| {
+            dispatch.enter(id);
+        });
+    }
+}
+
+impl Drop for SpanGuard {
+    fn drop(&mut self) {
+        self.0.with_subscriber(|(id, dispatch)| {
+            dispatch.exit(id);
+        });
+    }
+}
+
+/// A stack frame.
+pub struct Frame<'mir, 'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> {
+    ////////////////////////////////////////////////////////////////////////////////
+    // Function and callsite information
+    ////////////////////////////////////////////////////////////////////////////////
+    /// The MIR for the function called on this frame.
+    pub body: &'mir mir::Body<'tcx>,
+
+    /// The def_id and args of the current function.
+    pub instance: ty::Instance<'tcx>,
+
+    /// Extra data for the machine.
+    pub extra: Extra,
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Return place and locals
+    ////////////////////////////////////////////////////////////////////////////////
+    /// Work to perform when returning from this function.
+    pub return_to_block: StackPopCleanup,
+
+    /// The location where the result of the current stack frame should be written to,
+    /// and its layout in the caller.
+    pub return_place: MPlaceTy<'tcx, Prov>,
+
+    /// The list of locals for this stack frame, stored in order as
+    /// `[return_ptr, arguments..., variables..., temporaries...]`.
+    /// The locals are stored as `Option<Value>`s.
+    /// `None` represents a local that is currently dead, while a live local
+    /// can either directly contain `Scalar` or refer to some part of an `Allocation`.
+    ///
+    /// Do *not* access this directly; always go through the machine hook!
+    pub locals: IndexVec<mir::Local, LocalState<'tcx, Prov>>,
+
+    /// The span of the `tracing` crate is stored here.
+    /// When the guard is dropped, the span is exited. This gives us
+    /// a full stack trace on all tracing statements.
+    tracing_span: SpanGuard,
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Current position within the function
+    ////////////////////////////////////////////////////////////////////////////////
+    /// If this is `Right`, we are not currently executing any particular statement in
+    /// this frame (can happen e.g. during frame initialization, and during unwinding on
+    /// frames without cleanup code).
+    ///
+    /// Needs to be public because ConstProp does unspeakable things to it.
+    pub loc: Either<mir::Location, Span>,
+}
+
+/// What we store about a frame in an interpreter backtrace.
+#[derive(Clone, Debug)]
+pub struct FrameInfo<'tcx> {
+    pub instance: ty::Instance<'tcx>,
+    pub span: Span,
+}
+
+#[derive(Clone, Copy, Eq, PartialEq, Debug)] // Miri debug-prints these
+pub enum StackPopCleanup {
+    /// Jump to the next block in the caller, or cause UB if None (that's a function
+    /// that may never return). Also store layout of return place so
+    /// we can validate it at that layout.
+    /// `ret` stores the block we jump to on a normal return, while `unwind`
+    /// stores the block used for cleanup during unwinding.
+    Goto { ret: Option<mir::BasicBlock>, unwind: mir::UnwindAction },
+    /// The root frame of the stack: nowhere else to jump to.
+    /// `cleanup` says whether locals are deallocated. Static computation
+    /// wants them leaked to intern what they need (and just throw away
+    /// the entire `ecx` when it is done).
+    Root { cleanup: bool },
+}
+
+/// State of a local variable including a memoized layout
+#[derive(Clone)]
+pub struct LocalState<'tcx, Prov: Provenance = CtfeProvenance> {
+    value: LocalValue<Prov>,
+    /// Don't modify if `Some`, this is only used to prevent computing the layout twice.
+    /// Avoids computing the layout of locals that are never actually initialized.
+    layout: Cell<Option<TyAndLayout<'tcx>>>,
+}
+
+impl<Prov: Provenance> std::fmt::Debug for LocalState<'_, Prov> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("LocalState")
+            .field("value", &self.value)
+            .field("ty", &self.layout.get().map(|l| l.ty))
+            .finish()
+    }
+}
+
+/// Current value of a local variable
+///
+/// This does not store the type of the local; the type is given by `body.local_decls` and can never
+/// change, so by not storing here we avoid having to maintain that as an invariant.
+#[derive(Copy, Clone, Debug)] // Miri debug-prints these
+pub(super) enum LocalValue<Prov: Provenance = CtfeProvenance> {
+    /// This local is not currently alive, and cannot be used at all.
+    Dead,
+    /// A normal, live local.
+    /// Mostly for convenience, we re-use the `Operand` type here.
+    /// This is an optimization over just always having a pointer here;
+    /// we can thus avoid doing an allocation when the local just stores
+    /// immediate values *and* never has its address taken.
+    Live(Operand<Prov>),
+}
+
+impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> {
+    pub fn make_live_uninit(&mut self) {
+        self.value = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
+    }
+
+    /// This is a hack because Miri needs a way to visit all the provenance in a `LocalState`
+    /// without having a layout or `TyCtxt` available, and we want to keep the `Operand` type
+    /// private.
+    pub fn as_mplace_or_imm(
+        &self,
+    ) -> Option<Either<(Pointer<Option<Prov>>, MemPlaceMeta<Prov>), Immediate<Prov>>> {
+        match self.value {
+            LocalValue::Dead => None,
+            LocalValue::Live(Operand::Indirect(mplace)) => Some(Left((mplace.ptr, mplace.meta))),
+            LocalValue::Live(Operand::Immediate(imm)) => Some(Right(imm)),
+        }
+    }
+
+    /// Read the local's value or error if the local is not yet live or not live anymore.
+    #[inline(always)]
+    pub(super) fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
+        match &self.value {
+            LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
+            LocalValue::Live(val) => Ok(val),
+        }
+    }
+
+    /// Overwrite the local. If the local can be overwritten in place, return a reference
+    /// to do so; otherwise return the `MemPlace` to consult instead.
+    #[inline(always)]
+    pub(super) fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
+        match &mut self.value {
+            LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
+            LocalValue::Live(val) => Ok(val),
+        }
+    }
+}
+
+impl<'mir, 'tcx, Prov: Provenance> Frame<'mir, 'tcx, Prov> {
+    pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Prov, Extra> {
+        Frame {
+            body: self.body,
+            instance: self.instance,
+            return_to_block: self.return_to_block,
+            return_place: self.return_place,
+            locals: self.locals,
+            loc: self.loc,
+            extra,
+            tracing_span: self.tracing_span,
+        }
+    }
+}
+
+impl<'mir, 'tcx, Prov: Provenance, Extra> Frame<'mir, 'tcx, Prov, Extra> {
+    /// Get the current location within the Frame.
+    ///
+    /// If this is `Left`, we are not currently executing any particular statement in
+    /// this frame (can happen e.g. during frame initialization, and during unwinding on
+    /// frames without cleanup code).
+    ///
+    /// Used by priroda.
+    pub fn current_loc(&self) -> Either<mir::Location, Span> {
+        self.loc
+    }
+
+    /// Return the `SourceInfo` of the current instruction.
+    pub fn current_source_info(&self) -> Option<&mir::SourceInfo> {
+        self.loc.left().map(|loc| self.body.source_info(loc))
+    }
+
+    pub fn current_span(&self) -> Span {
+        match self.loc {
+            Left(loc) => self.body.source_info(loc).span,
+            Right(span) => span,
+        }
+    }
+
+    pub fn lint_root(&self) -> Option<hir::HirId> {
+        self.current_source_info().and_then(|source_info| {
+            match &self.body.source_scopes[source_info.scope].local_data {
+                mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
+                mir::ClearCrossCrate::Clear => None,
+            }
+        })
+    }
+
+    /// Returns the address of the buffer where the locals are stored. This is used by `Place` as a
+    /// sanity check to detect bugs where we mix up which stack frame a place refers to.
+    #[inline(always)]
+    pub(super) fn locals_addr(&self) -> usize {
+        self.locals.raw.as_ptr().addr()
+    }
+
+    #[must_use]
+    pub fn generate_stacktrace_from_stack(stack: &[Self]) -> Vec<FrameInfo<'tcx>> {
+        let mut frames = Vec::new();
+        // This deliberately does *not* honor `requires_caller_location` since it is used for much
+        // more than just panics.
+        for frame in stack.iter().rev() {
+            let span = match frame.loc {
+                Left(loc) => {
+                    // If the stacktrace passes through MIR-inlined source scopes, add them.
+                    let mir::SourceInfo { mut span, scope } = *frame.body.source_info(loc);
+                    let mut scope_data = &frame.body.source_scopes[scope];
+                    while let Some((instance, call_span)) = scope_data.inlined {
+                        frames.push(FrameInfo { span, instance });
+                        span = call_span;
+                        scope_data = &frame.body.source_scopes[scope_data.parent_scope.unwrap()];
+                    }
+                    span
+                }
+                Right(span) => span,
+            };
+            frames.push(FrameInfo { span, instance: frame.instance });
+        }
+        trace!("generate stacktrace: {:#?}", frames);
+        frames
+    }
+}
+
+// FIXME: only used by miri, should be removed once translatable.
+impl<'tcx> fmt::Display for FrameInfo<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        ty::tls::with(|tcx| {
+            if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure {
+                write!(f, "inside closure")
+            } else {
+                // Note: this triggers a `must_produce_diag` state, which means that if we ever
+                // get here we must emit a diagnostic. We should never display a `FrameInfo` unless
+                // we actually want to emit a warning or error to the user.
+                write!(f, "inside `{}`", self.instance)
+            }
+        })
+    }
+}
+
+impl<'tcx> FrameInfo<'tcx> {
+    pub fn as_note(&self, tcx: TyCtxt<'tcx>) -> errors::FrameNote {
+        let span = self.span;
+        if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure {
+            errors::FrameNote { where_: "closure", span, instance: String::new(), times: 0 }
+        } else {
+            let instance = format!("{}", self.instance);
+            // Note: this triggers a `must_produce_diag` state, which means that if we ever get
+            // here we must emit a diagnostic. We should never display a `FrameInfo` unless we
+            // actually want to emit a warning or error to the user.
+            errors::FrameNote { where_: "instance", span, instance, times: 0 }
+        }
+    }
+}
+
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> {
+    #[inline]
+    fn data_layout(&self) -> &TargetDataLayout {
+        &self.tcx.data_layout
+    }
+}
+
+impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M>
+where
+    M: Machine<'mir, 'tcx>,
+{
+    #[inline]
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        *self.tcx
+    }
+}
+
+impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M>
+where
+    M: Machine<'mir, 'tcx>,
+{
+    fn param_env(&self) -> ty::ParamEnv<'tcx> {
+        self.param_env
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> LayoutOfHelpers<'tcx> for InterpCx<'mir, 'tcx, M> {
+    type LayoutOfResult = InterpResult<'tcx, TyAndLayout<'tcx>>;
+
+    #[inline]
+    fn layout_tcx_at_span(&self) -> Span {
+        // Using the cheap root span for performance.
+        self.tcx.span
+    }
+
+    #[inline]
+    fn handle_layout_err(
+        &self,
+        err: LayoutError<'tcx>,
+        _: Span,
+        _: Ty<'tcx>,
+    ) -> InterpErrorInfo<'tcx> {
+        err_inval!(Layout(err)).into()
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> FnAbiOfHelpers<'tcx> for InterpCx<'mir, 'tcx, M> {
+    type FnAbiOfResult = InterpResult<'tcx, &'tcx FnAbi<'tcx, Ty<'tcx>>>;
+
+    fn handle_fn_abi_err(
+        &self,
+        err: FnAbiError<'tcx>,
+        _span: Span,
+        _fn_abi_request: FnAbiRequest<'tcx>,
+    ) -> InterpErrorInfo<'tcx> {
+        match err {
+            FnAbiError::Layout(err) => err_inval!(Layout(err)).into(),
+            FnAbiError::AdjustForForeignAbi(err) => {
+                err_inval!(FnAbiAdjustForForeignAbi(err)).into()
+            }
+        }
+    }
+}
+
+/// Test if it is valid for a MIR assignment to assign `src`-typed place to `dest`-typed value.
+/// This test should be symmetric, as it is primarily about layout compatibility.
+pub(super) fn mir_assign_valid_types<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    src: TyAndLayout<'tcx>,
+    dest: TyAndLayout<'tcx>,
+) -> bool {
+    // Type-changing assignments can happen when subtyping is used. While
+    // all normal lifetimes are erased, higher-ranked types with their
+    // late-bound lifetimes are still around and can lead to type
+    // differences.
+    if util::relate_types(tcx, param_env, Variance::Covariant, src.ty, dest.ty) {
+        // Make sure the layout is equal, too -- just to be safe. Miri really
+        // needs layout equality. For performance reason we skip this check when
+        // the types are equal. Equal types *can* have different layouts when
+        // enum downcast is involved (as enum variants carry the type of the
+        // enum), but those should never occur in assignments.
+        if cfg!(debug_assertions) || src.ty != dest.ty {
+            assert_eq!(src.layout, dest.layout);
+        }
+        true
+    } else {
+        false
+    }
+}
+
+/// Use the already known layout if given (but sanity check in debug mode),
+/// or compute the layout.
+#[cfg_attr(not(debug_assertions), inline(always))]
+pub(super) fn from_known_layout<'tcx>(
+    tcx: TyCtxtAt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    known_layout: Option<TyAndLayout<'tcx>>,
+    compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>,
+) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+    match known_layout {
+        None => compute(),
+        Some(known_layout) => {
+            if cfg!(debug_assertions) {
+                let check_layout = compute()?;
+                if !mir_assign_valid_types(tcx.tcx, param_env, check_layout, known_layout) {
+                    span_bug!(
+                        tcx.span,
+                        "expected type differs from actual type.\nexpected: {}\nactual: {}",
+                        known_layout.ty,
+                        check_layout.ty,
+                    );
+                }
+            }
+            Ok(known_layout)
+        }
+    }
+}
+
+/// Turn the given error into a human-readable string. Expects the string to be printed, so if
+/// `RUSTC_CTFE_BACKTRACE` is set this will show a backtrace of the rustc internals that
+/// triggered the error.
+///
+/// This is NOT the preferred way to render an error; use `report` from `const_eval` instead.
+/// However, this is useful when error messages appear in ICEs.
+pub fn format_interp_error<'tcx>(dcx: &DiagCtxt, e: InterpErrorInfo<'tcx>) -> String {
+    let (e, backtrace) = e.into_parts();
+    backtrace.print_backtrace();
+    // FIXME(fee1-dead), HACK: we want to use the error as title therefore we can just extract the
+    // label and arguments from the InterpError.
+    #[allow(rustc::untranslatable_diagnostic)]
+    let mut diag = dcx.struct_allow("");
+    let msg = e.diagnostic_message();
+    e.add_args(&mut diag);
+    let s = dcx.eagerly_translate_to_string(msg, diag.args.iter());
+    diag.cancel();
+    s
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    pub fn new(
+        tcx: TyCtxt<'tcx>,
+        root_span: Span,
+        param_env: ty::ParamEnv<'tcx>,
+        machine: M,
+    ) -> Self {
+        InterpCx {
+            machine,
+            tcx: tcx.at(root_span),
+            param_env,
+            memory: Memory::new(),
+            recursion_limit: tcx.recursion_limit(),
+        }
+    }
+
+    #[inline(always)]
+    pub fn cur_span(&self) -> Span {
+        // This deliberately does *not* honor `requires_caller_location` since it is used for much
+        // more than just panics.
+        self.stack().last().map_or(self.tcx.span, |f| f.current_span())
+    }
+
+    #[inline(always)]
+    /// Find the first stack frame that is within the current crate, if any, otherwise return the crate's HirId
+    pub fn best_lint_scope(&self) -> hir::HirId {
+        self.stack()
+            .iter()
+            .find_map(|frame| frame.body.source.def_id().as_local())
+            .map_or(CRATE_HIR_ID, |def_id| self.tcx.local_def_id_to_hir_id(def_id))
+    }
+
+    #[inline(always)]
+    pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>] {
+        M::stack(self)
+    }
+
+    #[inline(always)]
+    pub(crate) fn stack_mut(
+        &mut self,
+    ) -> &mut Vec<Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>> {
+        M::stack_mut(self)
+    }
+
+    #[inline(always)]
+    pub fn frame_idx(&self) -> usize {
+        let stack = self.stack();
+        assert!(!stack.is_empty());
+        stack.len() - 1
+    }
+
+    #[inline(always)]
+    pub fn frame(&self) -> &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra> {
+        self.stack().last().expect("no call frames exist")
+    }
+
+    #[inline(always)]
+    pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::Provenance, M::FrameExtra> {
+        self.stack_mut().last_mut().expect("no call frames exist")
+    }
+
+    #[inline(always)]
+    pub fn body(&self) -> &'mir mir::Body<'tcx> {
+        self.frame().body
+    }
+
+    #[inline(always)]
+    pub fn sign_extend(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
+        assert!(ty.abi.is_signed());
+        ty.size.sign_extend(value)
+    }
+
+    #[inline(always)]
+    pub fn truncate(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
+        ty.size.truncate(value)
+    }
+
+    #[inline]
+    pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
+        ty.is_freeze(*self.tcx, self.param_env)
+    }
+
+    pub fn load_mir(
+        &self,
+        instance: ty::InstanceDef<'tcx>,
+        promoted: Option<mir::Promoted>,
+    ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+        trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
+        let body = if let Some(promoted) = promoted {
+            let def = instance.def_id();
+            &self.tcx.promoted_mir(def)[promoted]
+        } else {
+            M::load_mir(self, instance)?
+        };
+        // do not continue if typeck errors occurred (can only occur in local crate)
+        if let Some(err) = body.tainted_by_errors {
+            throw_inval!(AlreadyReported(ReportedErrorInfo::tainted_by_errors(err)));
+        }
+        Ok(body)
+    }
+
+    /// Call this on things you got out of the MIR (so it is as generic as the current
+    /// stack frame), to bring it into the proper environment for this interpreter.
+    pub(super) fn instantiate_from_current_frame_and_normalize_erasing_regions<
+        T: TypeFoldable<TyCtxt<'tcx>>,
+    >(
+        &self,
+        value: T,
+    ) -> Result<T, ErrorHandled> {
+        self.instantiate_from_frame_and_normalize_erasing_regions(self.frame(), value)
+    }
+
+    /// Call this on things you got out of the MIR (so it is as generic as the provided
+    /// stack frame), to bring it into the proper environment for this interpreter.
+    pub(super) fn instantiate_from_frame_and_normalize_erasing_regions<
+        T: TypeFoldable<TyCtxt<'tcx>>,
+    >(
+        &self,
+        frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
+        value: T,
+    ) -> Result<T, ErrorHandled> {
+        frame
+            .instance
+            .try_instantiate_mir_and_normalize_erasing_regions(
+                *self.tcx,
+                self.param_env,
+                ty::EarlyBinder::bind(value),
+            )
+            .map_err(|_| ErrorHandled::TooGeneric(self.cur_span()))
+    }
+
+    /// The `args` are assumed to already be in our interpreter "universe" (param_env).
+    pub(super) fn resolve(
+        &self,
+        def: DefId,
+        args: GenericArgsRef<'tcx>,
+    ) -> InterpResult<'tcx, ty::Instance<'tcx>> {
+        trace!("resolve: {:?}, {:#?}", def, args);
+        trace!("param_env: {:#?}", self.param_env);
+        trace!("args: {:#?}", args);
+        match ty::Instance::resolve(*self.tcx, self.param_env, def, args) {
+            Ok(Some(instance)) => Ok(instance),
+            Ok(None) => throw_inval!(TooGeneric),
+
+            // FIXME(eddyb) this could be a bit more specific than `AlreadyReported`.
+            Err(error_reported) => throw_inval!(AlreadyReported(error_reported.into())),
+        }
+    }
+
+    /// Walks up the callstack from the intrinsic's callsite, searching for the first callsite in a
+    /// frame which is not `#[track_caller]`. This is the fancy version of `cur_span`.
+    pub(crate) fn find_closest_untracked_caller_location(&self) -> Span {
+        for frame in self.stack().iter().rev() {
+            debug!("find_closest_untracked_caller_location: checking frame {:?}", frame.instance);
+
+            // Assert that the frame we look at is actually executing code currently
+            // (`loc` is `Right` when we are unwinding and the frame does not require cleanup).
+            let loc = frame.loc.left().unwrap();
+
+            // This could be a non-`Call` terminator (such as `Drop`), or not a terminator at all
+            // (such as `box`). Use the normal span by default.
+            let mut source_info = *frame.body.source_info(loc);
+
+            // If this is a `Call` terminator, use the `fn_span` instead.
+            let block = &frame.body.basic_blocks[loc.block];
+            if loc.statement_index == block.statements.len() {
+                debug!(
+                    "find_closest_untracked_caller_location: got terminator {:?} ({:?})",
+                    block.terminator(),
+                    block.terminator().kind,
+                );
+                if let mir::TerminatorKind::Call { fn_span, .. } = block.terminator().kind {
+                    source_info.span = fn_span;
+                }
+            }
+
+            let caller_location = if frame.instance.def.requires_caller_location(*self.tcx) {
+                // We use `Err(())` as indication that we should continue up the call stack since
+                // this is a `#[track_caller]` function.
+                Some(Err(()))
+            } else {
+                None
+            };
+            if let Ok(span) =
+                frame.body.caller_location_span(source_info, caller_location, *self.tcx, Ok)
+            {
+                return span;
+            }
+        }
+
+        span_bug!(self.cur_span(), "no non-`#[track_caller]` frame found")
+    }
+
+    #[inline(always)]
+    pub(super) fn layout_of_local(
+        &self,
+        frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
+        local: mir::Local,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+        let state = &frame.locals[local];
+        if let Some(layout) = state.layout.get() {
+            return Ok(layout);
+        }
+
+        let layout = from_known_layout(self.tcx, self.param_env, layout, || {
+            let local_ty = frame.body.local_decls[local].ty;
+            let local_ty =
+                self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?;
+            self.layout_of(local_ty)
+        })?;
+
+        // Layouts of locals are requested a lot, so we cache them.
+        state.layout.set(Some(layout));
+        Ok(layout)
+    }
+
+    /// Returns the actual dynamic size and alignment of the place at the given type.
+    /// Only the "meta" (metadata) part of the place matters.
+    /// This can fail to provide an answer for extern types.
+    pub(super) fn size_and_align_of(
+        &self,
+        metadata: &MemPlaceMeta<M::Provenance>,
+        layout: &TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, Option<(Size, Align)>> {
+        if layout.is_sized() {
+            return Ok(Some((layout.size, layout.align.abi)));
+        }
+        match layout.ty.kind() {
+            ty::Adt(..) | ty::Tuple(..) => {
+                // First get the size of all statically known fields.
+                // Don't use type_of::sizing_type_of because that expects t to be sized,
+                // and it also rounds up to alignment, which we want to avoid,
+                // as the unsized field's alignment could be smaller.
+                assert!(!layout.ty.is_simd());
+                assert!(layout.fields.count() > 0);
+                trace!("DST layout: {:?}", layout);
+
+                let unsized_offset_unadjusted = layout.fields.offset(layout.fields.count() - 1);
+                let sized_align = layout.align.abi;
+
+                // Recurse to get the size of the dynamically sized field (must be
+                // the last field). Can't have foreign types here, how would we
+                // adjust alignment and size for them?
+                let field = layout.field(self, layout.fields.count() - 1);
+                let Some((unsized_size, mut unsized_align)) =
+                    self.size_and_align_of(metadata, &field)?
+                else {
+                    // A field with an extern type. We don't know the actual dynamic size
+                    // or the alignment.
+                    return Ok(None);
+                };
+
+                // # First compute the dynamic alignment
+
+                // Packed type alignment needs to be capped.
+                if let ty::Adt(def, _) = layout.ty.kind() {
+                    if let Some(packed) = def.repr().pack {
+                        unsized_align = unsized_align.min(packed);
+                    }
+                }
+
+                // Choose max of two known alignments (combined value must
+                // be aligned according to more restrictive of the two).
+                let full_align = sized_align.max(unsized_align);
+
+                // # Then compute the dynamic size
+
+                let unsized_offset_adjusted = unsized_offset_unadjusted.align_to(unsized_align);
+                let full_size = (unsized_offset_adjusted + unsized_size).align_to(full_align);
+
+                // Just for our sanitiy's sake, assert that this is equal to what codegen would compute.
+                assert_eq!(
+                    full_size,
+                    (unsized_offset_unadjusted + unsized_size).align_to(full_align)
+                );
+
+                // Check if this brought us over the size limit.
+                if full_size > self.max_size_of_val() {
+                    throw_ub!(InvalidMeta(InvalidMetaKind::TooBig));
+                }
+                Ok(Some((full_size, full_align)))
+            }
+            ty::Dynamic(_, _, ty::Dyn) => {
+                let vtable = metadata.unwrap_meta().to_pointer(self)?;
+                // Read size and align from vtable (already checks size).
+                Ok(Some(self.get_vtable_size_and_align(vtable)?))
+            }
+
+            ty::Slice(_) | ty::Str => {
+                let len = metadata.unwrap_meta().to_target_usize(self)?;
+                let elem = layout.field(self, 0);
+
+                // Make sure the slice is not too big.
+                let size = elem.size.bytes().saturating_mul(len); // we rely on `max_size_of_val` being smaller than `u64::MAX`.
+                let size = Size::from_bytes(size);
+                if size > self.max_size_of_val() {
+                    throw_ub!(InvalidMeta(InvalidMetaKind::SliceTooBig));
+                }
+                Ok(Some((size, elem.align.abi)))
+            }
+
+            ty::Foreign(_) => Ok(None),
+
+            _ => span_bug!(self.cur_span(), "size_and_align_of::<{}> not supported", layout.ty),
+        }
+    }
+    #[inline]
+    pub fn size_and_align_of_mplace(
+        &self,
+        mplace: &MPlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, Option<(Size, Align)>> {
+        self.size_and_align_of(&mplace.meta(), &mplace.layout)
+    }
+
+    #[instrument(skip(self, body, return_place, return_to_block), level = "debug")]
+    pub fn push_stack_frame(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        body: &'mir mir::Body<'tcx>,
+        return_place: &MPlaceTy<'tcx, M::Provenance>,
+        return_to_block: StackPopCleanup,
+    ) -> InterpResult<'tcx> {
+        trace!("body: {:#?}", body);
+        let dead_local = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
+        let locals = IndexVec::from_elem(dead_local, &body.local_decls);
+        // First push a stack frame so we have access to the local args
+        let pre_frame = Frame {
+            body,
+            loc: Right(body.span), // Span used for errors caused during preamble.
+            return_to_block,
+            return_place: return_place.clone(),
+            locals,
+            instance,
+            tracing_span: SpanGuard::new(),
+            extra: (),
+        };
+        let frame = M::init_frame_extra(self, pre_frame)?;
+        self.stack_mut().push(frame);
+
+        // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
+        if M::POST_MONO_CHECKS {
+            for &const_ in &body.required_consts {
+                let c = self
+                    .instantiate_from_current_frame_and_normalize_erasing_regions(const_.const_)?;
+                c.eval(*self.tcx, self.param_env, Some(const_.span)).map_err(|err| {
+                    err.emit_note(*self.tcx);
+                    err
+                })?;
+            }
+        }
+
+        // done
+        M::after_stack_push(self)?;
+        self.frame_mut().loc = Left(mir::Location::START);
+
+        let span = info_span!("frame", "{}", instance);
+        self.frame_mut().tracing_span.enter(span);
+
+        Ok(())
+    }
+
+    /// Jump to the given block.
+    #[inline]
+    pub fn go_to_block(&mut self, target: mir::BasicBlock) {
+        self.frame_mut().loc = Left(mir::Location { block: target, statement_index: 0 });
+    }
+
+    /// *Return* to the given `target` basic block.
+    /// Do *not* use for unwinding! Use `unwind_to_block` instead.
+    ///
+    /// If `target` is `None`, that indicates the function cannot return, so we raise UB.
+    pub fn return_to_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
+        if let Some(target) = target {
+            self.go_to_block(target);
+            Ok(())
+        } else {
+            throw_ub!(Unreachable)
+        }
+    }
+
+    /// *Unwind* to the given `target` basic block.
+    /// Do *not* use for returning! Use `return_to_block` instead.
+    ///
+    /// If `target` is `UnwindAction::Continue`, that indicates the function does not need cleanup
+    /// during unwinding, and we will just keep propagating that upwards.
+    ///
+    /// If `target` is `UnwindAction::Unreachable`, that indicates the function does not allow
+    /// unwinding, and doing so is UB.
+    #[cold] // usually we have normal returns, not unwinding
+    pub fn unwind_to_block(&mut self, target: mir::UnwindAction) -> InterpResult<'tcx> {
+        self.frame_mut().loc = match target {
+            mir::UnwindAction::Cleanup(block) => Left(mir::Location { block, statement_index: 0 }),
+            mir::UnwindAction::Continue => Right(self.frame_mut().body.span),
+            mir::UnwindAction::Unreachable => {
+                throw_ub_custom!(fluent::const_eval_unreachable_unwind);
+            }
+            mir::UnwindAction::Terminate(reason) => {
+                self.frame_mut().loc = Right(self.frame_mut().body.span);
+                M::unwind_terminate(self, reason)?;
+                // This might have pushed a new stack frame, or it terminated execution.
+                // Either way, `loc` will not be updated.
+                return Ok(());
+            }
+        };
+        Ok(())
+    }
+
+    /// Pops the current frame from the stack, deallocating the
+    /// memory for allocated locals.
+    ///
+    /// If `unwinding` is `false`, then we are performing a normal return
+    /// from a function. In this case, we jump back into the frame of the caller,
+    /// and continue execution as normal.
+    ///
+    /// If `unwinding` is `true`, then we are in the middle of a panic,
+    /// and need to unwind this frame. In this case, we jump to the
+    /// `cleanup` block for the function, which is responsible for running
+    /// `Drop` impls for any locals that have been initialized at this point.
+    /// The cleanup block ends with a special `Resume` terminator, which will
+    /// cause us to continue unwinding.
+    #[instrument(skip(self), level = "debug")]
+    pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> {
+        info!(
+            "popping stack frame ({})",
+            if unwinding { "during unwinding" } else { "returning from function" }
+        );
+
+        // Check `unwinding`.
+        assert_eq!(
+            unwinding,
+            match self.frame().loc {
+                Left(loc) => self.body().basic_blocks[loc.block].is_cleanup,
+                Right(_) => true,
+            }
+        );
+        if unwinding && self.frame_idx() == 0 {
+            throw_ub_custom!(fluent::const_eval_unwind_past_top);
+        }
+
+        M::before_stack_pop(self, self.frame())?;
+
+        // Copy return value. Must of course happen *before* we deallocate the locals.
+        let copy_ret_result = if !unwinding {
+            let op = self
+                .local_to_op(mir::RETURN_PLACE, None)
+                .expect("return place should always be live");
+            let dest = self.frame().return_place.clone();
+            let err = if self.stack().len() == 1 {
+                // The initializer of constants and statics will get validated separately
+                // after the constant has been fully evaluated. While we could fall back to the default
+                // code path, that will cause -Zenforce-validity to cycle on static initializers.
+                // Reading from a static's memory is not allowed during its evaluation, and will always
+                // trigger a cycle error. Validation must read from the memory of the current item.
+                // For Miri this means we do not validate the root frame return value,
+                // but Miri anyway calls `read_target_isize` on that so separate validation
+                // is not needed.
+                self.copy_op_no_dest_validation(&op, &dest)
+            } else {
+                self.copy_op_allow_transmute(&op, &dest)
+            };
+            trace!("return value: {:?}", self.dump_place(&dest.into()));
+            // We delay actually short-circuiting on this error until *after* the stack frame is
+            // popped, since we want this error to be attributed to the caller, whose type defines
+            // this transmute.
+            err
+        } else {
+            Ok(())
+        };
+
+        // Cleanup: deallocate locals.
+        // Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
+        // We do this while the frame is still on the stack, so errors point to the callee.
+        let return_to_block = self.frame().return_to_block;
+        let cleanup = match return_to_block {
+            StackPopCleanup::Goto { .. } => true,
+            StackPopCleanup::Root { cleanup, .. } => cleanup,
+        };
+        if cleanup {
+            // We need to take the locals out, since we need to mutate while iterating.
+            let locals = mem::take(&mut self.frame_mut().locals);
+            for local in &locals {
+                self.deallocate_local(local.value)?;
+            }
+        }
+
+        // All right, now it is time to actually pop the frame.
+        // Note that its locals are gone already, but that's fine.
+        let frame =
+            self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
+        // Report error from return value copy, if any.
+        copy_ret_result?;
+
+        // If we are not doing cleanup, also skip everything else.
+        if !cleanup {
+            assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
+            assert!(!unwinding, "tried to skip cleanup during unwinding");
+            // Skip machine hook.
+            return Ok(());
+        }
+        if M::after_stack_pop(self, frame, unwinding)? == StackPopJump::NoJump {
+            // The hook already did everything.
+            return Ok(());
+        }
+
+        // Normal return, figure out where to jump.
+        if unwinding {
+            // Follow the unwind edge.
+            let unwind = match return_to_block {
+                StackPopCleanup::Goto { unwind, .. } => unwind,
+                StackPopCleanup::Root { .. } => {
+                    panic!("encountered StackPopCleanup::Root when unwinding!")
+                }
+            };
+            // This must be the very last thing that happens, since it can in fact push a new stack frame.
+            self.unwind_to_block(unwind)
+        } else {
+            // Follow the normal return edge.
+            match return_to_block {
+                StackPopCleanup::Goto { ret, .. } => self.return_to_block(ret),
+                StackPopCleanup::Root { .. } => {
+                    assert!(
+                        self.stack().is_empty(),
+                        "only the topmost frame can have StackPopCleanup::Root"
+                    );
+                    Ok(())
+                }
+            }
+        }
+    }
+
+    /// In the current stack frame, mark all locals as live that are not arguments and don't have
+    /// `Storage*` annotations (this includes the return place).
+    pub fn storage_live_for_always_live_locals(&mut self) -> InterpResult<'tcx> {
+        self.storage_live(mir::RETURN_PLACE)?;
+
+        let body = self.body();
+        let always_live = always_storage_live_locals(body);
+        for local in body.vars_and_temps_iter() {
+            if always_live.contains(local) {
+                self.storage_live(local)?;
+            }
+        }
+        Ok(())
+    }
+
+    pub fn storage_live_dyn(
+        &mut self,
+        local: mir::Local,
+        meta: MemPlaceMeta<M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        trace!("{:?} is now live", local);
+
+        // We avoid `ty.is_trivially_sized` since that (a) cannot assume WF, so it recurses through
+        // all fields of a tuple, and (b) does something expensive for ADTs.
+        fn is_very_trivially_sized(ty: Ty<'_>) -> bool {
+            match ty.kind() {
+                ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
+                | ty::Uint(_)
+                | ty::Int(_)
+                | ty::Bool
+                | ty::Float(_)
+                | ty::FnDef(..)
+                | ty::FnPtr(_)
+                | ty::RawPtr(..)
+                | ty::Char
+                | ty::Ref(..)
+                | ty::Coroutine(..)
+                | ty::CoroutineWitness(..)
+                | ty::Array(..)
+                | ty::Closure(..)
+                | ty::CoroutineClosure(..)
+                | ty::Never
+                | ty::Error(_) => true,
+
+                ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => false,
+
+                ty::Tuple(tys) => tys.last().iter().all(|ty| is_very_trivially_sized(**ty)),
+
+                // We don't want to do any queries, so there is not much we can do with ADTs.
+                ty::Adt(..) => false,
+
+                ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false,
+
+                ty::Infer(ty::TyVar(_)) => false,
+
+                ty::Bound(..)
+                | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+                    bug!("`is_very_trivially_sized` applied to unexpected type: {}", ty)
+                }
+            }
+        }
+
+        // This is a hot function, we avoid computing the layout when possible.
+        // `unsized_` will be `None` for sized types and `Some(layout)` for unsized types.
+        let unsized_ = if is_very_trivially_sized(self.body().local_decls[local].ty) {
+            None
+        } else {
+            // We need the layout.
+            let layout = self.layout_of_local(self.frame(), local, None)?;
+            if layout.is_sized() { None } else { Some(layout) }
+        };
+
+        let local_val = LocalValue::Live(if let Some(layout) = unsized_ {
+            if !meta.has_meta() {
+                throw_unsup!(UnsizedLocal);
+            }
+            // Need to allocate some memory, since `Immediate::Uninit` cannot be unsized.
+            let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?;
+            Operand::Indirect(*dest_place.mplace())
+        } else {
+            assert!(!meta.has_meta()); // we're dropping the metadata
+            // Just make this an efficient immediate.
+            // Note that not calling `layout_of` here does have one real consequence:
+            // if the type is too big, we'll only notice this when the local is actually initialized,
+            // which is a bit too late -- we should ideally notice this already here, when the memory
+            // is conceptually allocated. But given how rare that error is and that this is a hot function,
+            // we accept this downside for now.
+            Operand::Immediate(Immediate::Uninit)
+        });
+
+        // StorageLive expects the local to be dead, and marks it live.
+        let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
+        if !matches!(old, LocalValue::Dead) {
+            throw_ub_custom!(fluent::const_eval_double_storage_live);
+        }
+        Ok(())
+    }
+
+    /// Mark a storage as live, killing the previous content.
+    #[inline(always)]
+    pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
+        self.storage_live_dyn(local, MemPlaceMeta::None)
+    }
+
+    pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> {
+        assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
+        trace!("{:?} is now dead", local);
+
+        // It is entirely okay for this local to be already dead (at least that's how we currently generate MIR)
+        let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead);
+        self.deallocate_local(old)?;
+        Ok(())
+    }
+
+    #[instrument(skip(self), level = "debug")]
+    fn deallocate_local(&mut self, local: LocalValue<M::Provenance>) -> InterpResult<'tcx> {
+        if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
+            // All locals have a backing allocation, even if the allocation is empty
+            // due to the local having ZST type. Hence we can `unwrap`.
+            trace!(
+                "deallocating local {:?}: {:?}",
+                local,
+                // Locals always have a `alloc_id` (they are never the result of a int2ptr).
+                self.dump_alloc(ptr.provenance.unwrap().get_alloc_id().unwrap())
+            );
+            self.deallocate_ptr(ptr, None, MemoryKind::Stack)?;
+        };
+        Ok(())
+    }
+
+    /// Call a query that can return `ErrorHandled`. Should be used for statics and other globals.
+    /// (`mir::Const`/`ty::Const` have `eval` methods that can be used directly instead.)
+    pub fn ctfe_query<T>(
+        &self,
+        query: impl FnOnce(TyCtxtAt<'tcx>) -> Result<T, ErrorHandled>,
+    ) -> Result<T, ErrorHandled> {
+        // Use a precise span for better cycle errors.
+        query(self.tcx.at(self.cur_span())).map_err(|err| {
+            err.emit_note(*self.tcx);
+            err
+        })
+    }
+
+    pub fn eval_global(
+        &self,
+        instance: ty::Instance<'tcx>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+        let gid = GlobalId { instance, promoted: None };
+        let val = if self.tcx.is_static(gid.instance.def_id()) {
+            let alloc_id = self.tcx.reserve_and_set_static_alloc(gid.instance.def_id());
+
+            let ty = instance.ty(self.tcx.tcx, self.param_env);
+            mir::ConstAlloc { alloc_id, ty }
+        } else {
+            self.ctfe_query(|tcx| tcx.eval_to_allocation_raw(self.param_env.and(gid)))?
+        };
+        self.raw_const_to_mplace(val)
+    }
+
+    pub fn eval_mir_constant(
+        &self,
+        val: &mir::Const<'tcx>,
+        span: Option<Span>,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        M::eval_mir_constant(self, *val, span, layout, |ecx, val, span, layout| {
+            let const_val = val.eval(*ecx.tcx, ecx.param_env, span).map_err(|err| {
+                // FIXME: somehow this is reachable even when POST_MONO_CHECKS is on.
+                // Are we not always populating `required_consts`?
+                err.emit_note(*ecx.tcx);
+                err
+            })?;
+            ecx.const_val_to_op(const_val, val.ty(), layout)
+        })
+    }
+
+    #[must_use]
+    pub fn dump_place(
+        &self,
+        place: &PlaceTy<'tcx, M::Provenance>,
+    ) -> PlacePrinter<'_, 'mir, 'tcx, M> {
+        PlacePrinter { ecx: self, place: *place.place() }
+    }
+
+    #[must_use]
+    pub fn generate_stacktrace(&self) -> Vec<FrameInfo<'tcx>> {
+        Frame::generate_stacktrace_from_stack(self.stack())
+    }
+}
+
+#[doc(hidden)]
+/// Helper struct for the `dump_place` function.
+pub struct PlacePrinter<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    ecx: &'a InterpCx<'mir, 'tcx, M>,
+    place: Place<M::Provenance>,
+}
+
+impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
+    for PlacePrinter<'a, 'mir, 'tcx, M>
+{
+    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self.place {
+            Place::Local { local, offset, locals_addr } => {
+                debug_assert_eq!(locals_addr, self.ecx.frame().locals_addr());
+                let mut allocs = Vec::new();
+                write!(fmt, "{local:?}")?;
+                if let Some(offset) = offset {
+                    write!(fmt, "+{:#x}", offset.bytes())?;
+                }
+                write!(fmt, ":")?;
+
+                match self.ecx.frame().locals[local].value {
+                    LocalValue::Dead => write!(fmt, " is dead")?,
+                    LocalValue::Live(Operand::Immediate(Immediate::Uninit)) => {
+                        write!(fmt, " is uninitialized")?
+                    }
+                    LocalValue::Live(Operand::Indirect(mplace)) => {
+                        write!(
+                            fmt,
+                            " by {} ref {:?}:",
+                            match mplace.meta {
+                                MemPlaceMeta::Meta(meta) => format!(" meta({meta:?})"),
+                                MemPlaceMeta::None => String::new(),
+                            },
+                            mplace.ptr,
+                        )?;
+                        allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id));
+                    }
+                    LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
+                        write!(fmt, " {val:?}")?;
+                        if let Scalar::Ptr(ptr, _size) = val {
+                            allocs.push(ptr.provenance.get_alloc_id());
+                        }
+                    }
+                    LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
+                        write!(fmt, " ({val1:?}, {val2:?})")?;
+                        if let Scalar::Ptr(ptr, _size) = val1 {
+                            allocs.push(ptr.provenance.get_alloc_id());
+                        }
+                        if let Scalar::Ptr(ptr, _size) = val2 {
+                            allocs.push(ptr.provenance.get_alloc_id());
+                        }
+                    }
+                }
+
+                write!(fmt, ": {:?}", self.ecx.dump_allocs(allocs.into_iter().flatten().collect()))
+            }
+            Place::Ptr(mplace) => match mplace.ptr.provenance.and_then(Provenance::get_alloc_id) {
+                Some(alloc_id) => {
+                    write!(fmt, "by ref {:?}: {:?}", mplace.ptr, self.ecx.dump_alloc(alloc_id))
+                }
+                ptr => write!(fmt, " integral by ref: {ptr:?}"),
+            },
+        }
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
new file mode 100644
index 00000000000..17bb59aae8f
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -0,0 +1,331 @@
+//! This module specifies the type based interner for constants.
+//!
+//! After a const evaluation has computed a value, before we destroy the const evaluator's session
+//! memory, we need to extract all memory allocations to the global memory pool so they stay around.
+//!
+//! In principle, this is not very complicated: we recursively walk the final value, follow all the
+//! pointers, and move all reachable allocations to the global `tcx` memory. The only complication
+//! is picking the right mutability: the outermost allocation generally has a clear mutability, but
+//! what about the other allocations it points to that have also been created with this value? We
+//! don't want to do guesswork here. The rules are: `static`, `const`, and promoted can only create
+//! immutable allocations that way. `static mut` can be initialized with expressions like `&mut 42`,
+//! so all inner allocations are marked mutable. Some of them could potentially be made immutable,
+//! but that would require relying on type information, and given how many ways Rust has to lie
+//! about type information, we want to avoid doing that.
+
+use hir::def::DefKind;
+use rustc_ast::Mutability;
+use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_middle::mir::interpret::{ConstAllocation, CtfeProvenance, InterpResult};
+use rustc_middle::query::TyCtxtAt;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_session::lint;
+use rustc_span::def_id::LocalDefId;
+use rustc_span::sym;
+
+use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy};
+use crate::const_eval;
+use crate::errors::{DanglingPtrInFinal, MutablePtrInFinal};
+
+pub trait CompileTimeMachine<'mir, 'tcx: 'mir, T> = Machine<
+        'mir,
+        'tcx,
+        MemoryKind = T,
+        Provenance = CtfeProvenance,
+        ExtraFnVal = !,
+        FrameExtra = (),
+        AllocExtra = (),
+        MemoryMap = FxIndexMap<AllocId, (MemoryKind<T>, Allocation)>,
+    > + HasStaticRootDefId;
+
+pub trait HasStaticRootDefId {
+    /// Returns the `DefId` of the static item that is currently being evaluated.
+    /// Used for interning to be able to handle nested allocations.
+    fn static_def_id(&self) -> Option<LocalDefId>;
+}
+
+impl HasStaticRootDefId for const_eval::CompileTimeInterpreter<'_, '_> {
+    fn static_def_id(&self) -> Option<LocalDefId> {
+        Some(self.static_root_ids?.1)
+    }
+}
+
+/// Intern an allocation. Returns `Err` if the allocation does not exist in the local memory.
+///
+/// `mutability` can be used to force immutable interning: if it is `Mutability::Not`, the
+/// allocation is interned immutably; if it is `Mutability::Mut`, then the allocation *must be*
+/// already mutable (as a sanity check).
+///
+/// Returns an iterator over all relocations referred to by this allocation.
+fn intern_shallow<'rt, 'mir, 'tcx, T, M: CompileTimeMachine<'mir, 'tcx, T>>(
+    ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
+    alloc_id: AllocId,
+    mutability: Mutability,
+) -> Result<impl Iterator<Item = CtfeProvenance> + 'tcx, ()> {
+    trace!("intern_shallow {:?}", alloc_id);
+    // remove allocation
+    // FIXME(#120456) - is `swap_remove` correct?
+    let Some((_kind, mut alloc)) = ecx.memory.alloc_map.swap_remove(&alloc_id) else {
+        return Err(());
+    };
+    // Set allocation mutability as appropriate. This is used by LLVM to put things into
+    // read-only memory, and also by Miri when evaluating other globals that
+    // access this one.
+    match mutability {
+        Mutability::Not => {
+            alloc.mutability = Mutability::Not;
+        }
+        Mutability::Mut => {
+            // This must be already mutable, we won't "un-freeze" allocations ever.
+            assert_eq!(alloc.mutability, Mutability::Mut);
+        }
+    }
+    // link the alloc id to the actual allocation
+    let alloc = ecx.tcx.mk_const_alloc(alloc);
+    if let Some(static_id) = ecx.machine.static_def_id() {
+        intern_as_new_static(ecx.tcx, static_id, alloc_id, alloc);
+    } else {
+        ecx.tcx.set_alloc_id_memory(alloc_id, alloc);
+    }
+    Ok(alloc.0.0.provenance().ptrs().iter().map(|&(_, prov)| prov))
+}
+
+/// Creates a new `DefId` and feeds all the right queries to make this `DefId`
+/// appear as if it were a user-written `static` (though it has no HIR).
+fn intern_as_new_static<'tcx>(
+    tcx: TyCtxtAt<'tcx>,
+    static_id: LocalDefId,
+    alloc_id: AllocId,
+    alloc: ConstAllocation<'tcx>,
+) {
+    let feed = tcx.create_def(
+        static_id,
+        sym::nested,
+        DefKind::Static { mutability: alloc.0.mutability, nested: true },
+    );
+    tcx.set_nested_alloc_id_static(alloc_id, feed.def_id());
+    feed.codegen_fn_attrs(tcx.codegen_fn_attrs(static_id).clone());
+    feed.eval_static_initializer(Ok(alloc));
+    feed.generics_of(tcx.generics_of(static_id).clone());
+    feed.def_ident_span(tcx.def_ident_span(static_id));
+    feed.explicit_predicates_of(tcx.explicit_predicates_of(static_id));
+}
+
+/// How a constant value should be interned.
+#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
+pub enum InternKind {
+    /// The `mutability` of the static, ignoring the type which may have interior mutability.
+    Static(hir::Mutability),
+    /// A `const` item
+    Constant,
+    Promoted,
+}
+
+/// Intern `ret` and everything it references.
+///
+/// This *cannot raise an interpreter error*. Doing so is left to validation, which
+/// tracks where in the value we are and thus can show much better error messages.
+///
+/// For `InternKind::Static` the root allocation will not be interned, but must be handled by the caller.
+#[instrument(level = "debug", skip(ecx))]
+pub fn intern_const_alloc_recursive<
+    'mir,
+    'tcx: 'mir,
+    M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>,
+>(
+    ecx: &mut InterpCx<'mir, 'tcx, M>,
+    intern_kind: InternKind,
+    ret: &MPlaceTy<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+    // We are interning recursively, and for mutability we are distinguishing the "root" allocation
+    // that we are starting in, and all other allocations that we are encountering recursively.
+    let (base_mutability, inner_mutability, is_static) = match intern_kind {
+        InternKind::Constant | InternKind::Promoted => {
+            // Completely immutable. Interning anything mutably here can only lead to unsoundness,
+            // since all consts are conceptually independent values but share the same underlying
+            // memory.
+            (Mutability::Not, Mutability::Not, false)
+        }
+        InternKind::Static(Mutability::Not) => {
+            (
+                // Outermost allocation is mutable if `!Freeze`.
+                if ret.layout.ty.is_freeze(*ecx.tcx, ecx.param_env) {
+                    Mutability::Not
+                } else {
+                    Mutability::Mut
+                },
+                // Inner allocations are never mutable. They can only arise via the "tail
+                // expression" / "outer scope" rule, and we treat them consistently with `const`.
+                Mutability::Not,
+                true,
+            )
+        }
+        InternKind::Static(Mutability::Mut) => {
+            // Just make everything mutable. We accept code like
+            // `static mut X = &mut [42]`, so even inner allocations need to be mutable.
+            (Mutability::Mut, Mutability::Mut, true)
+        }
+    };
+
+    // Intern the base allocation, and initialize todo list for recursive interning.
+    let base_alloc_id = ret.ptr().provenance.unwrap().alloc_id();
+    trace!(?base_alloc_id, ?base_mutability);
+    // First we intern the base allocation, as it requires a different mutability.
+    // This gives us the initial set of nested allocations, which will then all be processed
+    // recursively in the loop below.
+    let mut todo: Vec<_> = if is_static {
+        // Do not steal the root allocation, we need it later to create the return value of `eval_static_initializer`.
+        // But still change its mutability to match the requested one.
+        let alloc = ecx.memory.alloc_map.get_mut(&base_alloc_id).unwrap();
+        alloc.1.mutability = base_mutability;
+        alloc.1.provenance().ptrs().iter().map(|&(_, prov)| prov).collect()
+    } else {
+        intern_shallow(ecx, base_alloc_id, base_mutability).unwrap().collect()
+    };
+    // We need to distinguish "has just been interned" from "was already in `tcx`",
+    // so we track this in a separate set.
+    let mut just_interned: FxHashSet<_> = std::iter::once(base_alloc_id).collect();
+    // Whether we encountered a bad mutable pointer.
+    // We want to first report "dangling" and then "mutable", so we need to delay reporting these
+    // errors.
+    let mut found_bad_mutable_pointer = false;
+
+    // Keep interning as long as there are things to intern.
+    // We show errors if there are dangling pointers, or mutable pointers in immutable contexts
+    // (i.e., everything except for `static mut`). When these errors affect references, it is
+    // unfortunate that we show these errors here and not during validation, since validation can
+    // show much nicer errors. However, we do need these checks to be run on all pointers, including
+    // raw pointers, so we cannot rely on validation to catch them -- and since interning runs
+    // before validation, and interning doesn't know the type of anything, this means we can't show
+    // better errors. Maybe we should consider doing validation before interning in the future.
+    while let Some(prov) = todo.pop() {
+        trace!(?prov);
+        let alloc_id = prov.alloc_id();
+
+        if base_alloc_id == alloc_id && is_static {
+            // This is a pointer to the static itself. It's ok for a static to refer to itself,
+            // even mutably. Whether that mutable pointer is legal at all is checked in validation.
+            // See tests/ui/statics/recursive_interior_mut.rs for how such a situation can occur.
+            // We also already collected all the nested allocations, so there's no need to do that again.
+            continue;
+        }
+
+        // Crucially, we check this *before* checking whether the `alloc_id`
+        // has already been interned. The point of this check is to ensure that when
+        // there are multiple pointers to the same allocation, they are *all* immutable.
+        // Therefore it would be bad if we only checked the first pointer to any given
+        // allocation.
+        // (It is likely not possible to actually have multiple pointers to the same allocation,
+        // so alternatively we could also check that and ICE if there are multiple such pointers.)
+        if intern_kind != InternKind::Promoted
+            && inner_mutability == Mutability::Not
+            && !prov.immutable()
+        {
+            if ecx.tcx.try_get_global_alloc(alloc_id).is_some()
+                && !just_interned.contains(&alloc_id)
+            {
+                // This is a pointer to some memory from another constant. We encounter mutable
+                // pointers to such memory since we do not always track immutability through
+                // these "global" pointers. Allowing them is harmless; the point of these checks
+                // during interning is to justify why we intern the *new* allocations immutably,
+                // so we can completely ignore existing allocations. We also don't need to add
+                // this to the todo list, since after all it is already interned.
+                continue;
+            }
+            // Found a mutable pointer inside a const where inner allocations should be
+            // immutable. We exclude promoteds from this, since things like `&mut []` and
+            // `&None::<Cell<i32>>` lead to promotion that can produce mutable pointers. We rely
+            // on the promotion analysis not screwing up to ensure that it is sound to intern
+            // promoteds as immutable.
+            trace!("found bad mutable pointer");
+            found_bad_mutable_pointer = true;
+        }
+        if ecx.tcx.try_get_global_alloc(alloc_id).is_some() {
+            // Already interned.
+            debug_assert!(!ecx.memory.alloc_map.contains_key(&alloc_id));
+            continue;
+        }
+        just_interned.insert(alloc_id);
+        // We always intern with `inner_mutability`, and furthermore we ensured above that if
+        // that is "immutable", then there are *no* mutable pointers anywhere in the newly
+        // interned memory -- justifying that we can indeed intern immutably. However this also
+        // means we can *not* easily intern immutably here if `prov.immutable()` is true and
+        // `inner_mutability` is `Mut`: there might be other pointers to that allocation, and
+        // we'd have to somehow check that they are *all* immutable before deciding that this
+        // allocation can be made immutable. In the future we could consider analyzing all
+        // pointers before deciding which allocations can be made immutable; but for now we are
+        // okay with losing some potential for immutability here. This can anyway only affect
+        // `static mut`.
+        todo.extend(intern_shallow(ecx, alloc_id, inner_mutability).map_err(|()| {
+            ecx.tcx.dcx().emit_err(DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind })
+        })?);
+    }
+    if found_bad_mutable_pointer {
+        let err_diag = MutablePtrInFinal { span: ecx.tcx.span, kind: intern_kind };
+        ecx.tcx.emit_node_span_lint(
+            lint::builtin::CONST_EVAL_MUTABLE_PTR_IN_FINAL_VALUE,
+            ecx.best_lint_scope(),
+            err_diag.span,
+            err_diag,
+        )
+    }
+
+    Ok(())
+}
+
+/// Intern `ret`. This function assumes that `ret` references no other allocation.
+#[instrument(level = "debug", skip(ecx))]
+pub fn intern_const_alloc_for_constprop<
+    'mir,
+    'tcx: 'mir,
+    T,
+    M: CompileTimeMachine<'mir, 'tcx, T>,
+>(
+    ecx: &mut InterpCx<'mir, 'tcx, M>,
+    alloc_id: AllocId,
+) -> InterpResult<'tcx, ()> {
+    if ecx.tcx.try_get_global_alloc(alloc_id).is_some() {
+        // The constant is already in global memory. Do nothing.
+        return Ok(());
+    }
+    // Move allocation to `tcx`.
+    for _ in intern_shallow(ecx, alloc_id, Mutability::Not).map_err(|()| err_ub!(DeadLocal))? {
+        // We are not doing recursive interning, so we don't currently support provenance.
+        // (If this assertion ever triggers, we should just implement a
+        // proper recursive interning loop -- or just call `intern_const_alloc_recursive`.
+        panic!("`intern_const_alloc_for_constprop` called on allocation with nested provenance")
+    }
+    Ok(())
+}
+
+impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
+    InterpCx<'mir, 'tcx, M>
+{
+    /// A helper function that allocates memory for the layout given and gives you access to mutate
+    /// it. Once your own mutation code is done, the backing `Allocation` is removed from the
+    /// current `Memory` and interned as read-only into the global memory.
+    pub fn intern_with_temp_alloc(
+        &mut self,
+        layout: TyAndLayout<'tcx>,
+        f: impl FnOnce(
+            &mut InterpCx<'mir, 'tcx, M>,
+            &PlaceTy<'tcx, M::Provenance>,
+        ) -> InterpResult<'tcx, ()>,
+    ) -> InterpResult<'tcx, AllocId> {
+        // `allocate` picks a fresh AllocId that we will associate with its data below.
+        let dest = self.allocate(layout, MemoryKind::Stack)?;
+        f(self, &dest.clone().into())?;
+        let alloc_id = dest.ptr().provenance.unwrap().alloc_id(); // this was just allocated, it must have provenance
+        for prov in intern_shallow(self, alloc_id, Mutability::Not).unwrap() {
+            // We are not doing recursive interning, so we don't currently support provenance.
+            // (If this assertion ever triggers, we should just implement a
+            // proper recursive interning loop -- or just call `intern_const_alloc_recursive`.
+            if self.tcx.try_get_global_alloc(prov.alloc_id()).is_none() {
+                panic!("`intern_with_temp_alloc` with nested allocations");
+            }
+        }
+        Ok(alloc_id)
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
new file mode 100644
index 00000000000..b68bfb4211d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -0,0 +1,678 @@
+//! Intrinsics and other functions that the interpreter executes without
+//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
+//! and miri.
+
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
+use rustc_middle::ty::GenericArgsRef;
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_middle::{
+    mir::{
+        self,
+        interpret::{
+            Allocation, ConstAllocation, GlobalId, InterpResult, PointerArithmetic, Scalar,
+        },
+        BinOp, ConstValue, NonDivergingIntrinsic,
+    },
+    ty::layout::TyAndLayout,
+};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_target::abi::Size;
+
+use super::{
+    util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, MPlaceTy, Machine, OpTy,
+    Pointer,
+};
+
+use crate::fluent_generated as fluent;
+
+/// Directly returns an `Allocation` containing an absolute path representation of the given type.
+pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAllocation<'tcx> {
+    let path = crate::util::type_name(tcx, ty);
+    let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes());
+    tcx.mk_const_alloc(alloc)
+}
+
+/// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
+/// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
+pub(crate) fn eval_nullary_intrinsic<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    def_id: DefId,
+    args: GenericArgsRef<'tcx>,
+) -> InterpResult<'tcx, ConstValue<'tcx>> {
+    let tp_ty = args.type_at(0);
+    let name = tcx.item_name(def_id);
+    Ok(match name {
+        sym::type_name => {
+            ensure_monomorphic_enough(tcx, tp_ty)?;
+            let alloc = alloc_type_name(tcx, tp_ty);
+            ConstValue::Slice { data: alloc, meta: alloc.inner().size().bytes() }
+        }
+        sym::needs_drop => {
+            ensure_monomorphic_enough(tcx, tp_ty)?;
+            ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env))
+        }
+        sym::pref_align_of => {
+            // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
+            let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(*e)))?;
+            ConstValue::from_target_usize(layout.align.pref.bytes(), &tcx)
+        }
+        sym::type_id => {
+            ensure_monomorphic_enough(tcx, tp_ty)?;
+            ConstValue::from_u128(tcx.type_id_hash(tp_ty).as_u128())
+        }
+        sym::variant_count => match tp_ty.kind() {
+            // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
+            ty::Adt(adt, _) => ConstValue::from_target_usize(adt.variants().len() as u64, &tcx),
+            ty::Alias(..) | ty::Param(_) | ty::Placeholder(_) | ty::Infer(_) => {
+                throw_inval!(TooGeneric)
+            }
+            ty::Bound(_, _) => bug!("bound ty during ctfe"),
+            ty::Bool
+            | ty::Char
+            | ty::Int(_)
+            | ty::Uint(_)
+            | ty::Float(_)
+            | ty::Foreign(_)
+            | ty::Str
+            | ty::Array(_, _)
+            | ty::Slice(_)
+            | ty::RawPtr(_)
+            | ty::Ref(_, _, _)
+            | ty::FnDef(_, _)
+            | ty::FnPtr(_)
+            | ty::Dynamic(_, _, _)
+            | ty::Closure(_, _)
+            | ty::CoroutineClosure(_, _)
+            | ty::Coroutine(_, _)
+            | ty::CoroutineWitness(..)
+            | ty::Never
+            | ty::Tuple(_)
+            | ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx),
+        },
+        other => bug!("`{}` is not a zero arg intrinsic", other),
+    })
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Returns `true` if emulation happened.
+    /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
+    /// intrinsic handling.
+    pub fn emulate_intrinsic(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx, M::Provenance>],
+        dest: &MPlaceTy<'tcx, M::Provenance>,
+        ret: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx, bool> {
+        let instance_args = instance.args;
+        let intrinsic_name = self.tcx.item_name(instance.def_id());
+        let Some(ret) = ret else {
+            // We don't support any intrinsic without return place.
+            return Ok(false);
+        };
+
+        match intrinsic_name {
+            sym::caller_location => {
+                let span = self.find_closest_untracked_caller_location();
+                let val = self.tcx.span_as_caller_location(span);
+                let val =
+                    self.const_val_to_op(val, self.tcx.caller_location_ty(), Some(dest.layout))?;
+                self.copy_op(&val, dest)?;
+            }
+
+            sym::min_align_of_val | sym::size_of_val => {
+                // Avoid `deref_pointer` -- this is not a deref, the ptr does not have to be
+                // dereferenceable!
+                let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
+                let (size, align) = self
+                    .size_and_align_of_mplace(&place)?
+                    .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
+
+                let result = match intrinsic_name {
+                    sym::min_align_of_val => align.bytes(),
+                    sym::size_of_val => size.bytes(),
+                    _ => bug!(),
+                };
+
+                self.write_scalar(Scalar::from_target_usize(result, self), dest)?;
+            }
+
+            sym::pref_align_of
+            | sym::needs_drop
+            | sym::type_id
+            | sym::type_name
+            | sym::variant_count => {
+                let gid = GlobalId { instance, promoted: None };
+                let ty = match intrinsic_name {
+                    sym::pref_align_of | sym::variant_count => self.tcx.types.usize,
+                    sym::needs_drop => self.tcx.types.bool,
+                    sym::type_id => self.tcx.types.u128,
+                    sym::type_name => Ty::new_static_str(self.tcx.tcx),
+                    _ => bug!(),
+                };
+                let val = self.ctfe_query(|tcx| {
+                    tcx.const_eval_global_id(self.param_env, gid, Some(tcx.span))
+                })?;
+                let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
+                self.copy_op(&val, dest)?;
+            }
+
+            sym::ctpop
+            | sym::cttz
+            | sym::cttz_nonzero
+            | sym::ctlz
+            | sym::ctlz_nonzero
+            | sym::bswap
+            | sym::bitreverse => {
+                let ty = instance_args.type_at(0);
+                let layout = self.layout_of(ty)?;
+                let val = self.read_scalar(&args[0])?;
+                let out_val = self.numeric_intrinsic(intrinsic_name, val, layout)?;
+                self.write_scalar(out_val, dest)?;
+            }
+            sym::saturating_add | sym::saturating_sub => {
+                let l = self.read_immediate(&args[0])?;
+                let r = self.read_immediate(&args[1])?;
+                let val = self.saturating_arith(
+                    if intrinsic_name == sym::saturating_add { BinOp::Add } else { BinOp::Sub },
+                    &l,
+                    &r,
+                )?;
+                self.write_scalar(val, dest)?;
+            }
+            sym::discriminant_value => {
+                let place = self.deref_pointer(&args[0])?;
+                let variant = self.read_discriminant(&place)?;
+                let discr = self.discriminant_for_variant(place.layout.ty, variant)?;
+                self.write_immediate(*discr, dest)?;
+            }
+            sym::exact_div => {
+                let l = self.read_immediate(&args[0])?;
+                let r = self.read_immediate(&args[1])?;
+                self.exact_div(&l, &r, dest)?;
+            }
+            sym::rotate_left | sym::rotate_right => {
+                // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
+                // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
+                let layout = self.layout_of(instance_args.type_at(0))?;
+                let val = self.read_scalar(&args[0])?;
+                let val_bits = val.to_bits(layout.size)?;
+                let raw_shift = self.read_scalar(&args[1])?;
+                let raw_shift_bits = raw_shift.to_bits(layout.size)?;
+                let width_bits = u128::from(layout.size.bits());
+                let shift_bits = raw_shift_bits % width_bits;
+                let inv_shift_bits = (width_bits - shift_bits) % width_bits;
+                let result_bits = if intrinsic_name == sym::rotate_left {
+                    (val_bits << shift_bits) | (val_bits >> inv_shift_bits)
+                } else {
+                    (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
+                };
+                let truncated_bits = self.truncate(result_bits, layout);
+                let result = Scalar::from_uint(truncated_bits, layout.size);
+                self.write_scalar(result, dest)?;
+            }
+            sym::copy => {
+                self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
+            }
+            sym::write_bytes => {
+                self.write_bytes_intrinsic(&args[0], &args[1], &args[2])?;
+            }
+            sym::compare_bytes => {
+                let result = self.compare_bytes_intrinsic(&args[0], &args[1], &args[2])?;
+                self.write_scalar(result, dest)?;
+            }
+            sym::arith_offset => {
+                let ptr = self.read_pointer(&args[0])?;
+                let offset_count = self.read_target_isize(&args[1])?;
+                let pointee_ty = instance_args.type_at(0);
+
+                let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
+                let offset_bytes = offset_count.wrapping_mul(pointee_size);
+                let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self);
+                self.write_pointer(offset_ptr, dest)?;
+            }
+            sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
+                let a = self.read_pointer(&args[0])?;
+                let b = self.read_pointer(&args[1])?;
+
+                let usize_layout = self.layout_of(self.tcx.types.usize)?;
+                let isize_layout = self.layout_of(self.tcx.types.isize)?;
+
+                // Get offsets for both that are at least relative to the same base.
+                let (a_offset, b_offset) =
+                    match (self.ptr_try_get_alloc_id(a), self.ptr_try_get_alloc_id(b)) {
+                        (Err(a), Err(b)) => {
+                            // Neither pointer points to an allocation.
+                            // If these are inequal or null, this *will* fail the deref check below.
+                            (a, b)
+                        }
+                        (Err(_), _) | (_, Err(_)) => {
+                            // We managed to find a valid allocation for one pointer, but not the other.
+                            // That means they are definitely not pointing to the same allocation.
+                            throw_ub_custom!(
+                                fluent::const_eval_different_allocations,
+                                name = intrinsic_name,
+                            );
+                        }
+                        (Ok((a_alloc_id, a_offset, _)), Ok((b_alloc_id, b_offset, _))) => {
+                            // Found allocation for both. They must be into the same allocation.
+                            if a_alloc_id != b_alloc_id {
+                                throw_ub_custom!(
+                                    fluent::const_eval_different_allocations,
+                                    name = intrinsic_name,
+                                );
+                            }
+                            // Use these offsets for distance calculation.
+                            (a_offset.bytes(), b_offset.bytes())
+                        }
+                    };
+
+                // Compute distance.
+                let dist = {
+                    // Addresses are unsigned, so this is a `usize` computation. We have to do the
+                    // overflow check separately anyway.
+                    let (val, overflowed) = {
+                        let a_offset = ImmTy::from_uint(a_offset, usize_layout);
+                        let b_offset = ImmTy::from_uint(b_offset, usize_layout);
+                        self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?
+                    };
+                    if overflowed {
+                        // a < b
+                        if intrinsic_name == sym::ptr_offset_from_unsigned {
+                            throw_ub_custom!(
+                                fluent::const_eval_unsigned_offset_from_overflow,
+                                a_offset = a_offset,
+                                b_offset = b_offset,
+                            );
+                        }
+                        // The signed form of the intrinsic allows this. If we interpret the
+                        // difference as isize, we'll get the proper signed difference. If that
+                        // seems *positive*, they were more than isize::MAX apart.
+                        let dist = val.to_scalar().to_target_isize(self)?;
+                        if dist >= 0 {
+                            throw_ub_custom!(
+                                fluent::const_eval_offset_from_underflow,
+                                name = intrinsic_name,
+                            );
+                        }
+                        dist
+                    } else {
+                        // b >= a
+                        let dist = val.to_scalar().to_target_isize(self)?;
+                        // If converting to isize produced a *negative* result, we had an overflow
+                        // because they were more than isize::MAX apart.
+                        if dist < 0 {
+                            throw_ub_custom!(
+                                fluent::const_eval_offset_from_overflow,
+                                name = intrinsic_name,
+                            );
+                        }
+                        dist
+                    }
+                };
+
+                // Check that the range between them is dereferenceable ("in-bounds or one past the
+                // end of the same allocation"). This is like the check in ptr_offset_inbounds.
+                let min_ptr = if dist >= 0 { b } else { a };
+                self.check_ptr_access(
+                    min_ptr,
+                    Size::from_bytes(dist.unsigned_abs()),
+                    CheckInAllocMsg::OffsetFromTest,
+                )?;
+
+                // Perform division by size to compute return value.
+                let ret_layout = if intrinsic_name == sym::ptr_offset_from_unsigned {
+                    assert!(0 <= dist && dist <= self.target_isize_max());
+                    usize_layout
+                } else {
+                    assert!(self.target_isize_min() <= dist && dist <= self.target_isize_max());
+                    isize_layout
+                };
+                let pointee_layout = self.layout_of(instance_args.type_at(0))?;
+                // If ret_layout is unsigned, we checked that so is the distance, so we are good.
+                let val = ImmTy::from_int(dist, ret_layout);
+                let size = ImmTy::from_int(pointee_layout.size.bytes(), ret_layout);
+                self.exact_div(&val, &size, dest)?;
+            }
+
+            sym::assert_inhabited
+            | sym::assert_zero_valid
+            | sym::assert_mem_uninitialized_valid => {
+                let ty = instance.args.type_at(0);
+                let requirement = ValidityRequirement::from_intrinsic(intrinsic_name).unwrap();
+
+                let should_panic = !self
+                    .tcx
+                    .check_validity_requirement((requirement, self.param_env.and(ty)))
+                    .map_err(|_| err_inval!(TooGeneric))?;
+
+                if should_panic {
+                    let layout = self.layout_of(ty)?;
+
+                    let msg = match requirement {
+                        // For *all* intrinsics we first check `is_uninhabited` to give a more specific
+                        // error message.
+                        _ if layout.abi.is_uninhabited() => format!(
+                            "aborted execution: attempted to instantiate uninhabited type `{ty}`"
+                        ),
+                        ValidityRequirement::Inhabited => bug!("handled earlier"),
+                        ValidityRequirement::Zero => format!(
+                            "aborted execution: attempted to zero-initialize type `{ty}`, which is invalid"
+                        ),
+                        ValidityRequirement::UninitMitigated0x01Fill => format!(
+                            "aborted execution: attempted to leave type `{ty}` uninitialized, which is invalid"
+                        ),
+                        ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"),
+                    };
+
+                    M::panic_nounwind(self, &msg)?;
+                    // Skip the `go_to_block` at the end.
+                    return Ok(true);
+                }
+            }
+            sym::simd_insert => {
+                let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
+                let elem = &args[2];
+                let (input, input_len) = self.operand_to_simd(&args[0])?;
+                let (dest, dest_len) = self.mplace_to_simd(dest)?;
+                assert_eq!(input_len, dest_len, "Return vector length must match input length");
+                // Bounds are not checked by typeck so we have to do it ourselves.
+                if index >= input_len {
+                    throw_ub_format!(
+                        "`simd_insert` index {index} is out-of-bounds of vector with length {input_len}"
+                    );
+                }
+
+                for i in 0..dest_len {
+                    let place = self.project_index(&dest, i)?;
+                    let value = if i == index {
+                        elem.clone()
+                    } else {
+                        self.project_index(&input, i)?.into()
+                    };
+                    self.copy_op(&value, &place)?;
+                }
+            }
+            sym::simd_extract => {
+                let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
+                let (input, input_len) = self.operand_to_simd(&args[0])?;
+                // Bounds are not checked by typeck so we have to do it ourselves.
+                if index >= input_len {
+                    throw_ub_format!(
+                        "`simd_extract` index {index} is out-of-bounds of vector with length {input_len}"
+                    );
+                }
+                self.copy_op(&self.project_index(&input, index)?, dest)?;
+            }
+            sym::likely | sym::unlikely | sym::black_box => {
+                // These just return their argument
+                self.copy_op(&args[0], dest)?;
+            }
+            sym::raw_eq => {
+                let result = self.raw_eq_intrinsic(&args[0], &args[1])?;
+                self.write_scalar(result, dest)?;
+            }
+
+            sym::vtable_size => {
+                let ptr = self.read_pointer(&args[0])?;
+                let (size, _align) = self.get_vtable_size_and_align(ptr)?;
+                self.write_scalar(Scalar::from_target_usize(size.bytes(), self), dest)?;
+            }
+            sym::vtable_align => {
+                let ptr = self.read_pointer(&args[0])?;
+                let (_size, align) = self.get_vtable_size_and_align(ptr)?;
+                self.write_scalar(Scalar::from_target_usize(align.bytes(), self), dest)?;
+            }
+
+            _ => return Ok(false),
+        }
+
+        trace!("{:?}", self.dump_place(&dest.clone().into()));
+        self.go_to_block(ret);
+        Ok(true)
+    }
+
+    pub(super) fn emulate_nondiverging_intrinsic(
+        &mut self,
+        intrinsic: &NonDivergingIntrinsic<'tcx>,
+    ) -> InterpResult<'tcx> {
+        match intrinsic {
+            NonDivergingIntrinsic::Assume(op) => {
+                let op = self.eval_operand(op, None)?;
+                let cond = self.read_scalar(&op)?.to_bool()?;
+                if !cond {
+                    throw_ub_custom!(fluent::const_eval_assume_false);
+                }
+                Ok(())
+            }
+            NonDivergingIntrinsic::CopyNonOverlapping(mir::CopyNonOverlapping {
+                count,
+                src,
+                dst,
+            }) => {
+                let src = self.eval_operand(src, None)?;
+                let dst = self.eval_operand(dst, None)?;
+                let count = self.eval_operand(count, None)?;
+                self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true)
+            }
+        }
+    }
+
+    pub fn numeric_intrinsic(
+        &self,
+        name: Symbol,
+        val: Scalar<M::Provenance>,
+        layout: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+        assert!(layout.ty.is_integral(), "invalid type for numeric intrinsic: {}", layout.ty);
+        let bits = val.to_bits(layout.size)?;
+        let extra = 128 - u128::from(layout.size.bits());
+        let bits_out = match name {
+            sym::ctpop => u128::from(bits.count_ones()),
+            sym::ctlz_nonzero | sym::cttz_nonzero if bits == 0 => {
+                throw_ub_custom!(fluent::const_eval_call_nonzero_intrinsic, name = name,);
+            }
+            sym::ctlz | sym::ctlz_nonzero => u128::from(bits.leading_zeros()) - extra,
+            sym::cttz | sym::cttz_nonzero => u128::from((bits << extra).trailing_zeros()) - extra,
+            sym::bswap => (bits << extra).swap_bytes(),
+            sym::bitreverse => (bits << extra).reverse_bits(),
+            _ => bug!("not a numeric intrinsic: {}", name),
+        };
+        Ok(Scalar::from_uint(bits_out, layout.size))
+    }
+
+    pub fn exact_div(
+        &mut self,
+        a: &ImmTy<'tcx, M::Provenance>,
+        b: &ImmTy<'tcx, M::Provenance>,
+        dest: &MPlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        assert_eq!(a.layout.ty, b.layout.ty);
+        assert!(matches!(a.layout.ty.kind(), ty::Int(..) | ty::Uint(..)));
+
+        // Performs an exact division, resulting in undefined behavior where
+        // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
+        // First, check x % y != 0 (or if that computation overflows).
+        let (res, overflow) = self.overflowing_binary_op(BinOp::Rem, a, b)?;
+        assert!(!overflow); // All overflow is UB, so this should never return on overflow.
+        if res.to_scalar().assert_bits(a.layout.size) != 0 {
+            throw_ub_custom!(
+                fluent::const_eval_exact_div_has_remainder,
+                a = format!("{a}"),
+                b = format!("{b}")
+            )
+        }
+        // `Rem` says this is all right, so we can let `Div` do its job.
+        self.binop_ignore_overflow(BinOp::Div, a, b, &dest.clone().into())
+    }
+
+    pub fn saturating_arith(
+        &self,
+        mir_op: BinOp,
+        l: &ImmTy<'tcx, M::Provenance>,
+        r: &ImmTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+        assert_eq!(l.layout.ty, r.layout.ty);
+        assert!(matches!(l.layout.ty.kind(), ty::Int(..) | ty::Uint(..)));
+        assert!(matches!(mir_op, BinOp::Add | BinOp::Sub));
+
+        let (val, overflowed) = self.overflowing_binary_op(mir_op, l, r)?;
+        Ok(if overflowed {
+            let size = l.layout.size;
+            let num_bits = size.bits();
+            if l.layout.abi.is_signed() {
+                // For signed ints the saturated value depends on the sign of the first
+                // term since the sign of the second term can be inferred from this and
+                // the fact that the operation has overflowed (if either is 0 no
+                // overflow can occur)
+                let first_term: u128 = l.to_scalar().to_bits(l.layout.size)?;
+                let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
+                if first_term_positive {
+                    // Negative overflow not possible since the positive first term
+                    // can only increase an (in range) negative term for addition
+                    // or corresponding negated positive term for subtraction
+                    Scalar::from_int(size.signed_int_max(), size)
+                } else {
+                    // Positive overflow not possible for similar reason
+                    // max negative
+                    Scalar::from_int(size.signed_int_min(), size)
+                }
+            } else {
+                // unsigned
+                if matches!(mir_op, BinOp::Add) {
+                    // max unsigned
+                    Scalar::from_uint(size.unsigned_int_max(), size)
+                } else {
+                    // underflow to 0
+                    Scalar::from_uint(0u128, size)
+                }
+            }
+        } else {
+            val.to_scalar()
+        })
+    }
+
+    /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
+    /// allocation. For integer pointers, we consider each of them their own tiny allocation of size
+    /// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value.
+    pub fn ptr_offset_inbounds(
+        &self,
+        ptr: Pointer<Option<M::Provenance>>,
+        offset_bytes: i64,
+    ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
+        // The offset being in bounds cannot rely on "wrapping around" the address space.
+        // So, first rule out overflows in the pointer arithmetic.
+        let offset_ptr = ptr.signed_offset(offset_bytes, self)?;
+        // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
+        // memory between these pointers must be accessible. Note that we do not require the
+        // pointers to be properly aligned (unlike a read/write operation).
+        let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
+        // This call handles checking for integer/null pointers.
+        self.check_ptr_access(
+            min_ptr,
+            Size::from_bytes(offset_bytes.unsigned_abs()),
+            CheckInAllocMsg::PointerArithmeticTest,
+        )?;
+        Ok(offset_ptr)
+    }
+
+    /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
+    pub(crate) fn copy_intrinsic(
+        &mut self,
+        src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+        dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+        count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+        nonoverlapping: bool,
+    ) -> InterpResult<'tcx> {
+        let count = self.read_target_usize(count)?;
+        let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
+        let (size, align) = (layout.size, layout.align.abi);
+        // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
+        // but no actual allocation can be big enough for the difference to be noticeable.
+        let size = size.checked_mul(count, self).ok_or_else(|| {
+            err_ub_custom!(
+                fluent::const_eval_size_overflow,
+                name = if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
+            )
+        })?;
+
+        let src = self.read_pointer(src)?;
+        let dst = self.read_pointer(dst)?;
+
+        self.check_ptr_align(src, align)?;
+        self.check_ptr_align(dst, align)?;
+
+        self.mem_copy(src, dst, size, nonoverlapping)
+    }
+
+    pub(crate) fn write_bytes_intrinsic(
+        &mut self,
+        dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+        byte: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+        count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+    ) -> InterpResult<'tcx> {
+        let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap().ty)?;
+
+        let dst = self.read_pointer(dst)?;
+        let byte = self.read_scalar(byte)?.to_u8()?;
+        let count = self.read_target_usize(count)?;
+
+        // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
+        // but no actual allocation can be big enough for the difference to be noticeable.
+        let len = layout.size.checked_mul(count, self).ok_or_else(|| {
+            err_ub_custom!(fluent::const_eval_size_overflow, name = "write_bytes")
+        })?;
+
+        let bytes = std::iter::repeat(byte).take(len.bytes_usize());
+        self.write_bytes_ptr(dst, bytes)
+    }
+
+    pub(crate) fn compare_bytes_intrinsic(
+        &mut self,
+        left: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+        right: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+        byte_count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+        let left = self.read_pointer(left)?;
+        let right = self.read_pointer(right)?;
+        let n = Size::from_bytes(self.read_target_usize(byte_count)?);
+
+        let left_bytes = self.read_bytes_ptr_strip_provenance(left, n)?;
+        let right_bytes = self.read_bytes_ptr_strip_provenance(right, n)?;
+
+        // `Ordering`'s discriminants are -1/0/+1, so casting does the right thing.
+        let result = Ord::cmp(left_bytes, right_bytes) as i32;
+        Ok(Scalar::from_i32(result))
+    }
+
+    pub(crate) fn raw_eq_intrinsic(
+        &mut self,
+        lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+        rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+        let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
+        assert!(layout.is_sized());
+
+        let get_bytes = |this: &InterpCx<'mir, 'tcx, M>,
+                         op: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+                         size|
+         -> InterpResult<'tcx, &[u8]> {
+            let ptr = this.read_pointer(op)?;
+            let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
+                // zero-sized access
+                return Ok(&[]);
+            };
+            if alloc_ref.has_provenance() {
+                throw_ub_custom!(fluent::const_eval_raw_eq_with_provenance);
+            }
+            alloc_ref.get_bytes_strip_provenance()
+        };
+
+        let lhs_bytes = get_bytes(self, lhs, layout.size)?;
+        let rhs_bytes = get_bytes(self, rhs, layout.size)?;
+        Ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
new file mode 100644
index 00000000000..afc4a80c283
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -0,0 +1,632 @@
+//! This module contains everything needed to instantiate an interpreter.
+//! This separation exists to ensure that no fancy miri features like
+//! interpreting common C functions leak into CTFE.
+
+use std::borrow::{Borrow, Cow};
+use std::fmt::Debug;
+use std::hash::Hash;
+
+use rustc_apfloat::{Float, FloatConvert};
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_middle::mir;
+use rustc_middle::query::TyCtxtAt;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_span::def_id::DefId;
+use rustc_span::Span;
+use rustc_target::abi::{Align, Size};
+use rustc_target::spec::abi::Abi as CallAbi;
+
+use super::{
+    AllocBytes, AllocId, AllocKind, AllocRange, Allocation, ConstAllocation, CtfeProvenance, FnArg,
+    Frame, ImmTy, InterpCx, InterpResult, MPlaceTy, MemoryKind, Misalignment, OpTy, PlaceTy,
+    Pointer, Provenance,
+};
+
+/// Data returned by Machine::stack_pop,
+/// to provide further control over the popping of the stack frame
+#[derive(Eq, PartialEq, Debug, Copy, Clone)]
+pub enum StackPopJump {
+    /// Indicates that no special handling should be
+    /// done - we'll either return normally or unwind
+    /// based on the terminator for the function
+    /// we're leaving.
+    Normal,
+
+    /// Indicates that we should *not* jump to the return/unwind address, as the callback already
+    /// took care of everything.
+    NoJump,
+}
+
+/// Whether this kind of memory is allowed to leak
+pub trait MayLeak: Copy {
+    fn may_leak(self) -> bool;
+}
+
+/// The functionality needed by memory to manage its allocations
+pub trait AllocMap<K: Hash + Eq, V> {
+    /// Tests if the map contains the given key.
+    /// Deliberately takes `&mut` because that is sufficient, and some implementations
+    /// can be more efficient then (using `RefCell::get_mut`).
+    fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
+    where
+        K: Borrow<Q>;
+
+    /// Callers should prefer [`AllocMap::contains_key`] when it is possible to call because it may
+    /// be more efficient. This function exists for callers that only have a shared reference
+    /// (which might make it slightly less efficient than `contains_key`, e.g. if
+    /// the data is stored inside a `RefCell`).
+    fn contains_key_ref<Q: ?Sized + Hash + Eq>(&self, k: &Q) -> bool
+    where
+        K: Borrow<Q>;
+
+    /// Inserts a new entry into the map.
+    fn insert(&mut self, k: K, v: V) -> Option<V>;
+
+    /// Removes an entry from the map.
+    fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
+    where
+        K: Borrow<Q>;
+
+    /// Returns data based on the keys and values in the map.
+    fn filter_map_collect<T>(&self, f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T>;
+
+    /// Returns a reference to entry `k`. If no such entry exists, call
+    /// `vacant` and either forward its error, or add its result to the map
+    /// and return a reference to *that*.
+    fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E>;
+
+    /// Returns a mutable reference to entry `k`. If no such entry exists, call
+    /// `vacant` and either forward its error, or add its result to the map
+    /// and return a reference to *that*.
+    fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E>;
+
+    /// Read-only lookup.
+    fn get(&self, k: K) -> Option<&V> {
+        self.get_or(k, || Err(())).ok()
+    }
+
+    /// Mutable lookup.
+    fn get_mut(&mut self, k: K) -> Option<&mut V> {
+        self.get_mut_or(k, || Err(())).ok()
+    }
+}
+
+/// Methods of this trait signifies a point where CTFE evaluation would fail
+/// and some use case dependent behaviour can instead be applied.
+pub trait Machine<'mir, 'tcx: 'mir>: Sized {
+    /// Additional memory kinds a machine wishes to distinguish from the builtin ones
+    type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static;
+
+    /// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
+    type Provenance: Provenance + Eq + Hash + 'static;
+
+    /// When getting the AllocId of a pointer, some extra data is also obtained from the provenance
+    /// that is passed to memory access hooks so they can do things with it.
+    type ProvenanceExtra: Copy + 'static;
+
+    /// Machines can define extra (non-instance) things that represent values of function pointers.
+    /// For example, Miri uses this to return a function pointer from `dlsym`
+    /// that can later be called to execute the right thing.
+    type ExtraFnVal: Debug + Copy;
+
+    /// Extra data stored in every call frame.
+    type FrameExtra;
+
+    /// Extra data stored in every allocation.
+    type AllocExtra: Debug + Clone + 'tcx;
+
+    /// Type for the bytes of the allocation.
+    type Bytes: AllocBytes + 'static;
+
+    /// Memory's allocation map
+    type MemoryMap: AllocMap<
+            AllocId,
+            (
+                MemoryKind<Self::MemoryKind>,
+                Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>,
+            ),
+        > + Default
+        + Clone;
+
+    /// The memory kind to use for copied global memory (held in `tcx`) --
+    /// or None if such memory should not be mutated and thus any such attempt will cause
+    /// a `ModifiedStatic` error to be raised.
+    /// Statics are copied under two circumstances: When they are mutated, and when
+    /// `adjust_allocation` (see below) returns an owned allocation
+    /// that is added to the memory so that the work is not done twice.
+    const GLOBAL_KIND: Option<Self::MemoryKind>;
+
+    /// Should the machine panic on allocation failures?
+    const PANIC_ON_ALLOC_FAIL: bool;
+
+    /// Should post-monomorphization checks be run when a stack frame is pushed?
+    const POST_MONO_CHECKS: bool = true;
+
+    /// Whether memory accesses should be alignment-checked.
+    fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+
+    /// Gives the machine a chance to detect more misalignment than the built-in checks would catch.
+    #[inline(always)]
+    fn alignment_check(
+        _ecx: &InterpCx<'mir, 'tcx, Self>,
+        _alloc_id: AllocId,
+        _alloc_align: Align,
+        _alloc_kind: AllocKind,
+        _offset: Size,
+        _align: Align,
+    ) -> Option<Misalignment> {
+        None
+    }
+
+    /// Whether to enforce the validity invariant for a specific layout.
+    fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool;
+
+    /// Whether function calls should be [ABI](CallAbi)-checked.
+    fn enforce_abi(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+        true
+    }
+
+    /// Whether Assert(OverflowNeg) and Assert(Overflow) MIR terminators should actually
+    /// check for overflow.
+    fn ignore_optional_overflow_checks(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+
+    /// Entry point for obtaining the MIR of anything that should get evaluated.
+    /// So not just functions and shims, but also const/static initializers, anonymous
+    /// constants, ...
+    fn load_mir(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        instance: ty::InstanceDef<'tcx>,
+    ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+        Ok(ecx.tcx.instance_mir(instance))
+    }
+
+    /// Entry point to all function calls.
+    ///
+    /// Returns either the mir to use for the call, or `None` if execution should
+    /// just proceed (which usually means this hook did all the work that the
+    /// called function should usually have done). In the latter case, it is
+    /// this hook's responsibility to advance the instruction pointer!
+    /// (This is to support functions like `__rust_maybe_catch_panic` that neither find a MIR
+    /// nor just jump to `ret`, but instead push their own stack frame.)
+    /// Passing `dest`and `ret` in the same `Option` proved very annoying when only one of them
+    /// was used.
+    fn find_mir_or_eval_fn(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        abi: CallAbi,
+        args: &[FnArg<'tcx, Self::Provenance>],
+        destination: &MPlaceTy<'tcx, Self::Provenance>,
+        target: Option<mir::BasicBlock>,
+        unwind: mir::UnwindAction,
+    ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>>;
+
+    /// Execute `fn_val`. It is the hook's responsibility to advance the instruction
+    /// pointer as appropriate.
+    fn call_extra_fn(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        fn_val: Self::ExtraFnVal,
+        abi: CallAbi,
+        args: &[FnArg<'tcx, Self::Provenance>],
+        destination: &MPlaceTy<'tcx, Self::Provenance>,
+        target: Option<mir::BasicBlock>,
+        unwind: mir::UnwindAction,
+    ) -> InterpResult<'tcx>;
+
+    /// Directly process an intrinsic without pushing a stack frame. It is the hook's
+    /// responsibility to advance the instruction pointer as appropriate.
+    fn call_intrinsic(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx, Self::Provenance>],
+        destination: &MPlaceTy<'tcx, Self::Provenance>,
+        target: Option<mir::BasicBlock>,
+        unwind: mir::UnwindAction,
+    ) -> InterpResult<'tcx>;
+
+    /// Called to evaluate `Assert` MIR terminators that trigger a panic.
+    fn assert_panic(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        msg: &mir::AssertMessage<'tcx>,
+        unwind: mir::UnwindAction,
+    ) -> InterpResult<'tcx>;
+
+    /// Called to trigger a non-unwinding panic.
+    fn panic_nounwind(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: &str) -> InterpResult<'tcx>;
+
+    /// Called when unwinding reached a state where execution should be terminated.
+    fn unwind_terminate(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        reason: mir::UnwindTerminateReason,
+    ) -> InterpResult<'tcx>;
+
+    /// Called for all binary operations where the LHS has pointer type.
+    ///
+    /// Returns a (value, overflowed) pair if the operation succeeded
+    fn binary_ptr_op(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        bin_op: mir::BinOp,
+        left: &ImmTy<'tcx, Self::Provenance>,
+        right: &ImmTy<'tcx, Self::Provenance>,
+    ) -> InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)>;
+
+    /// Generate the NaN returned by a float operation, given the list of inputs.
+    /// (This is all inputs, not just NaN inputs!)
+    fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>(
+        _ecx: &InterpCx<'mir, 'tcx, Self>,
+        _inputs: &[F1],
+    ) -> F2 {
+        // By default we always return the preferred NaN.
+        F2::NAN
+    }
+
+    /// Called before a basic block terminator is executed.
+    #[inline]
+    fn before_terminator(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Called when the interpreter encounters a `StatementKind::ConstEvalCounter` instruction.
+    /// You can use this to detect long or endlessly running programs.
+    #[inline]
+    fn increment_const_eval_counter(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Called before a global allocation is accessed.
+    /// `def_id` is `Some` if this is the "lazy" allocation of a static.
+    #[inline]
+    fn before_access_global(
+        _tcx: TyCtxtAt<'tcx>,
+        _machine: &Self,
+        _alloc_id: AllocId,
+        _allocation: ConstAllocation<'tcx>,
+        _static_def_id: Option<DefId>,
+        _is_write: bool,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Return the `AllocId` for the given thread-local static in the current thread.
+    fn thread_local_static_base_pointer(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        def_id: DefId,
+    ) -> InterpResult<'tcx, Pointer<Self::Provenance>> {
+        throw_unsup!(ThreadLocalStatic(def_id))
+    }
+
+    /// Return the root pointer for the given `extern static`.
+    fn extern_static_base_pointer(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        def_id: DefId,
+    ) -> InterpResult<'tcx, Pointer<Self::Provenance>>;
+
+    /// Return a "base" pointer for the given allocation: the one that is used for direct
+    /// accesses to this static/const/fn allocation, or the one returned from the heap allocator.
+    ///
+    /// Not called on `extern` or thread-local statics (those use the methods above).
+    fn adjust_alloc_base_pointer(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        ptr: Pointer,
+    ) -> InterpResult<'tcx, Pointer<Self::Provenance>>;
+
+    /// "Int-to-pointer cast"
+    fn ptr_from_addr_cast(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        addr: u64,
+    ) -> InterpResult<'tcx, Pointer<Option<Self::Provenance>>>;
+
+    /// Marks a pointer as exposed, allowing it's provenance
+    /// to be recovered. "Pointer-to-int cast"
+    fn expose_ptr(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        ptr: Pointer<Self::Provenance>,
+    ) -> InterpResult<'tcx>;
+
+    /// Convert a pointer with provenance into an allocation-offset pair
+    /// and extra provenance info.
+    ///
+    /// The returned `AllocId` must be the same as `ptr.provenance.get_alloc_id()`.
+    ///
+    /// When this fails, that means the pointer does not point to a live allocation.
+    fn ptr_get_alloc(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        ptr: Pointer<Self::Provenance>,
+    ) -> Option<(AllocId, Size, Self::ProvenanceExtra)>;
+
+    /// Called to adjust allocations to the Provenance and AllocExtra of this machine.
+    ///
+    /// The way we construct allocations is to always first construct it without extra and then add
+    /// the extra. This keeps uniform code paths for handling both allocations created by CTFE for
+    /// globals, and allocations created by Miri during evaluation.
+    ///
+    /// `kind` is the kind of the allocation being adjusted; it can be `None` when
+    /// it's a global and `GLOBAL_KIND` is `None`.
+    ///
+    /// This should avoid copying if no work has to be done! If this returns an owned
+    /// allocation (because a copy had to be done to adjust things), machine memory will
+    /// cache the result. (This relies on `AllocMap::get_or` being able to add the
+    /// owned allocation to the map even when the map is shared.)
+    ///
+    /// This must only fail if `alloc` contains provenance.
+    fn adjust_allocation<'b>(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        id: AllocId,
+        alloc: Cow<'b, Allocation>,
+        kind: Option<MemoryKind<Self::MemoryKind>>,
+    ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>;
+
+    /// Evaluate the inline assembly.
+    ///
+    /// This should take care of jumping to the next block (one of `targets`) when asm goto
+    /// is triggered, `targets[0]` when the assembly falls through, or diverge in case of
+    /// `InlineAsmOptions::NORETURN` being set.
+    fn eval_inline_asm(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _template: &'tcx [InlineAsmTemplatePiece],
+        _operands: &[mir::InlineAsmOperand<'tcx>],
+        _options: InlineAsmOptions,
+        _targets: &[mir::BasicBlock],
+    ) -> InterpResult<'tcx> {
+        throw_unsup_format!("inline assembly is not supported")
+    }
+
+    /// Hook for performing extra checks on a memory read access.
+    ///
+    /// This will *not* be called during validation!
+    ///
+    /// Takes read-only access to the allocation so we can keep all the memory read
+    /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
+    /// need to mutate.
+    ///
+    /// This is not invoked for ZST accesses, as no read actually happens.
+    #[inline(always)]
+    fn before_memory_read(
+        _tcx: TyCtxtAt<'tcx>,
+        _machine: &Self,
+        _alloc_extra: &Self::AllocExtra,
+        _prov: (AllocId, Self::ProvenanceExtra),
+        _range: AllocRange,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Hook for performing extra checks on any memory read access,
+    /// that involves an allocation, even ZST reads.
+    ///
+    /// This will *not* be called during validation!
+    ///
+    /// Used to prevent statics from self-initializing by reading from their own memory
+    /// as it is being initialized.
+    fn before_alloc_read(
+        _ecx: &InterpCx<'mir, 'tcx, Self>,
+        _alloc_id: AllocId,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Hook for performing extra checks on a memory write access.
+    /// This is not invoked for ZST accesses, as no write actually happens.
+    #[inline(always)]
+    fn before_memory_write(
+        _tcx: TyCtxtAt<'tcx>,
+        _machine: &mut Self,
+        _alloc_extra: &mut Self::AllocExtra,
+        _prov: (AllocId, Self::ProvenanceExtra),
+        _range: AllocRange,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Hook for performing extra operations on a memory deallocation.
+    #[inline(always)]
+    fn before_memory_deallocation(
+        _tcx: TyCtxtAt<'tcx>,
+        _machine: &mut Self,
+        _alloc_extra: &mut Self::AllocExtra,
+        _prov: (AllocId, Self::ProvenanceExtra),
+        _size: Size,
+        _align: Align,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Executes a retagging operation for a single pointer.
+    /// Returns the possibly adjusted pointer.
+    #[inline]
+    fn retag_ptr_value(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _kind: mir::RetagKind,
+        val: &ImmTy<'tcx, Self::Provenance>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>> {
+        Ok(val.clone())
+    }
+
+    /// Executes a retagging operation on a compound value.
+    /// Replaces all pointers stored in the given place.
+    #[inline]
+    fn retag_place_contents(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _kind: mir::RetagKind,
+        _place: &PlaceTy<'tcx, Self::Provenance>,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Called on places used for in-place function argument and return value handling.
+    ///
+    /// These places need to be protected to make sure the program cannot tell whether the
+    /// argument/return value was actually copied or passed in-place..
+    fn protect_in_place_function_argument(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        mplace: &MPlaceTy<'tcx, Self::Provenance>,
+    ) -> InterpResult<'tcx> {
+        // Without an aliasing model, all we can do is put `Uninit` into the place.
+        // Conveniently this also ensures that the place actually points to suitable memory.
+        ecx.write_uninit(mplace)
+    }
+
+    /// Called immediately before a new stack frame gets pushed.
+    fn init_frame_extra(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        frame: Frame<'mir, 'tcx, Self::Provenance>,
+    ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>;
+
+    /// Borrow the current thread's stack.
+    fn stack<'a>(
+        ecx: &'a InterpCx<'mir, 'tcx, Self>,
+    ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>];
+
+    /// Mutably borrow the current thread's stack.
+    fn stack_mut<'a>(
+        ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+    ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>;
+
+    /// Called immediately after a stack frame got pushed and its locals got initialized.
+    fn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Called just before the return value is copied to the caller-provided return place.
+    fn before_stack_pop(
+        _ecx: &InterpCx<'mir, 'tcx, Self>,
+        _frame: &Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Called immediately after a stack frame got popped, but before jumping back to the caller.
+    /// The `locals` have already been destroyed!
+    #[inline(always)]
+    fn after_stack_pop(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _frame: Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
+        unwinding: bool,
+    ) -> InterpResult<'tcx, StackPopJump> {
+        // By default, we do not support unwinding from panics
+        assert!(!unwinding);
+        Ok(StackPopJump::Normal)
+    }
+
+    /// Called immediately after actual memory was allocated for a local
+    /// but before the local's stack frame is updated to point to that memory.
+    #[inline(always)]
+    fn after_local_allocated(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _local: mir::Local,
+        _mplace: &MPlaceTy<'tcx, Self::Provenance>,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Evaluate the given constant. The `eval` function will do all the required evaluation,
+    /// but this hook has the chance to do some pre/postprocessing.
+    #[inline(always)]
+    fn eval_mir_constant<F>(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        val: mir::Const<'tcx>,
+        span: Option<Span>,
+        layout: Option<TyAndLayout<'tcx>>,
+        eval: F,
+    ) -> InterpResult<'tcx, OpTy<'tcx, Self::Provenance>>
+    where
+        F: Fn(
+            &InterpCx<'mir, 'tcx, Self>,
+            mir::Const<'tcx>,
+            Option<Span>,
+            Option<TyAndLayout<'tcx>>,
+        ) -> InterpResult<'tcx, OpTy<'tcx, Self::Provenance>>,
+    {
+        eval(ecx, val, span, layout)
+    }
+}
+
+/// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
+/// (CTFE and ConstProp) use the same instance. Here, we share that code.
+pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
+    type Provenance = CtfeProvenance;
+    type ProvenanceExtra = bool; // the "immutable" flag
+
+    type ExtraFnVal = !;
+
+    type MemoryMap =
+        rustc_data_structures::fx::FxIndexMap<AllocId, (MemoryKind<Self::MemoryKind>, Allocation)>;
+    const GLOBAL_KIND: Option<Self::MemoryKind> = None; // no copying of globals from `tcx` to machine memory
+
+    type AllocExtra = ();
+    type FrameExtra = ();
+    type Bytes = Box<[u8]>;
+
+    #[inline(always)]
+    fn ignore_optional_overflow_checks(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+        false
+    }
+
+    #[inline(always)]
+    fn unwind_terminate(
+        _ecx: &mut InterpCx<$mir, $tcx, Self>,
+        _reason: mir::UnwindTerminateReason,
+    ) -> InterpResult<$tcx> {
+        unreachable!("unwinding cannot happen during compile-time evaluation")
+    }
+
+    #[inline(always)]
+    fn call_extra_fn(
+        _ecx: &mut InterpCx<$mir, $tcx, Self>,
+        fn_val: !,
+        _abi: CallAbi,
+        _args: &[FnArg<$tcx>],
+        _destination: &MPlaceTy<$tcx, Self::Provenance>,
+        _target: Option<mir::BasicBlock>,
+        _unwind: mir::UnwindAction,
+    ) -> InterpResult<$tcx> {
+        match fn_val {}
+    }
+
+    #[inline(always)]
+    fn adjust_allocation<'b>(
+        _ecx: &InterpCx<$mir, $tcx, Self>,
+        _id: AllocId,
+        alloc: Cow<'b, Allocation>,
+        _kind: Option<MemoryKind<Self::MemoryKind>>,
+    ) -> InterpResult<$tcx, Cow<'b, Allocation<Self::Provenance>>> {
+        Ok(alloc)
+    }
+
+    fn extern_static_base_pointer(
+        ecx: &InterpCx<$mir, $tcx, Self>,
+        def_id: DefId,
+    ) -> InterpResult<$tcx, Pointer> {
+        // Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
+        Ok(Pointer::new(ecx.tcx.reserve_and_set_static_alloc(def_id).into(), Size::ZERO))
+    }
+
+    #[inline(always)]
+    fn adjust_alloc_base_pointer(
+        _ecx: &InterpCx<$mir, $tcx, Self>,
+        ptr: Pointer<CtfeProvenance>,
+    ) -> InterpResult<$tcx, Pointer<CtfeProvenance>> {
+        Ok(ptr)
+    }
+
+    #[inline(always)]
+    fn ptr_from_addr_cast(
+        _ecx: &InterpCx<$mir, $tcx, Self>,
+        addr: u64,
+    ) -> InterpResult<$tcx, Pointer<Option<CtfeProvenance>>> {
+        // Allow these casts, but make the pointer not dereferenceable.
+        // (I.e., they behave like transmutation.)
+        // This is correct because no pointers can ever be exposed in compile-time evaluation.
+        Ok(Pointer::from_addr_invalid(addr))
+    }
+
+    #[inline(always)]
+    fn ptr_get_alloc(
+        _ecx: &InterpCx<$mir, $tcx, Self>,
+        ptr: Pointer<CtfeProvenance>,
+    ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
+        // We know `offset` is relative to the allocation, so we can use `into_parts`.
+        let (prov, offset) = ptr.into_parts();
+        Some((prov.alloc_id(), offset, prov.immutable()))
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
new file mode 100644
index 00000000000..86aad2e1642
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -0,0 +1,1380 @@
+//! The memory subsystem.
+//!
+//! Generally, we use `Pointer` to denote memory addresses. However, some operations
+//! have a "size"-like parameter, and they take `Scalar` for the address because
+//! if the size is 0, then the pointer can also be a (properly aligned, non-null)
+//! integer. It is crucial that these operations call `check_align` *before*
+//! short-circuiting the empty case!
+
+use std::assert_matches::assert_matches;
+use std::borrow::Cow;
+use std::cell::Cell;
+use std::collections::VecDeque;
+use std::fmt;
+use std::ptr;
+
+use rustc_ast::Mutability;
+use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
+use rustc_hir::def::DefKind;
+use rustc_middle::mir::display_allocation;
+use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TyCtxt};
+use rustc_target::abi::{Align, HasDataLayout, Size};
+
+use crate::fluent_generated as fluent;
+
+use super::{
+    alloc_range, AllocBytes, AllocId, AllocMap, AllocRange, Allocation, CheckAlignMsg,
+    CheckInAllocMsg, CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak,
+    Misalignment, Pointer, PointerArithmetic, Provenance, Scalar,
+};
+
+#[derive(Debug, PartialEq, Copy, Clone)]
+pub enum MemoryKind<T> {
+    /// Stack memory. Error if deallocated except during a stack pop.
+    Stack,
+    /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
+    CallerLocation,
+    /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
+    Machine(T),
+}
+
+impl<T: MayLeak> MayLeak for MemoryKind<T> {
+    #[inline]
+    fn may_leak(self) -> bool {
+        match self {
+            MemoryKind::Stack => false,
+            MemoryKind::CallerLocation => true,
+            MemoryKind::Machine(k) => k.may_leak(),
+        }
+    }
+}
+
+impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            MemoryKind::Stack => write!(f, "stack variable"),
+            MemoryKind::CallerLocation => write!(f, "caller location"),
+            MemoryKind::Machine(m) => write!(f, "{m}"),
+        }
+    }
+}
+
+/// The return value of `get_alloc_info` indicates the "kind" of the allocation.
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum AllocKind {
+    /// A regular live data allocation.
+    LiveData,
+    /// A function allocation (that fn ptrs point to).
+    Function,
+    /// A (symbolic) vtable allocation.
+    VTable,
+    /// A dead allocation.
+    Dead,
+}
+
+/// The value of a function pointer.
+#[derive(Debug, Copy, Clone)]
+pub enum FnVal<'tcx, Other> {
+    Instance(Instance<'tcx>),
+    Other(Other),
+}
+
+impl<'tcx, Other> FnVal<'tcx, Other> {
+    pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
+        match self {
+            FnVal::Instance(instance) => Ok(instance),
+            FnVal::Other(_) => {
+                throw_unsup_format!("'foreign' function pointers are not supported in this context")
+            }
+        }
+    }
+}
+
+// `Memory` has to depend on the `Machine` because some of its operations
+// (e.g., `get`) call a `Machine` hook.
+pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    /// Allocations local to this instance of the interpreter. The kind
+    /// helps ensure that the same mechanism is used for allocation and
+    /// deallocation. When an allocation is not found here, it is a
+    /// global and looked up in the `tcx` for read access. Some machines may
+    /// have to mutate this map even on a read-only access to a global (because
+    /// they do pointer provenance tracking and the allocations in `tcx` have
+    /// the wrong type), so we let the machine override this type.
+    /// Either way, if the machine allows writing to a global, doing so will
+    /// create a copy of the global allocation here.
+    // FIXME: this should not be public, but interning currently needs access to it
+    pub(super) alloc_map: M::MemoryMap,
+
+    /// Map for "extra" function pointers.
+    extra_fn_ptr_map: FxIndexMap<AllocId, M::ExtraFnVal>,
+
+    /// To be able to compare pointers with null, and to check alignment for accesses
+    /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
+    /// that do not exist any more.
+    // FIXME: this should not be public, but interning currently needs access to it
+    pub(super) dead_alloc_map: FxIndexMap<AllocId, (Size, Align)>,
+
+    /// This stores whether we are currently doing reads purely for the purpose of validation.
+    /// Those reads do not trigger the machine's hooks for memory reads.
+    /// Needless to say, this must only be set with great care!
+    validation_in_progress: Cell<bool>,
+}
+
+/// A reference to some allocation that was already bounds-checked for the given region
+/// and had the on-access machine hooks run.
+#[derive(Copy, Clone)]
+pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
+    alloc: &'a Allocation<Prov, Extra, Bytes>,
+    range: AllocRange,
+    tcx: TyCtxt<'tcx>,
+    alloc_id: AllocId,
+}
+/// A reference to some allocation that was already bounds-checked for the given region
+/// and had the on-access machine hooks run.
+pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
+    alloc: &'a mut Allocation<Prov, Extra, Bytes>,
+    range: AllocRange,
+    tcx: TyCtxt<'tcx>,
+    alloc_id: AllocId,
+}
+
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+    pub fn new() -> Self {
+        Memory {
+            alloc_map: M::MemoryMap::default(),
+            extra_fn_ptr_map: FxIndexMap::default(),
+            dead_alloc_map: FxIndexMap::default(),
+            validation_in_progress: Cell::new(false),
+        }
+    }
+
+    /// This is used by [priroda](https://github.com/oli-obk/priroda)
+    pub fn alloc_map(&self) -> &M::MemoryMap {
+        &self.alloc_map
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
+    /// the machine pointer to the allocation. Must never be used
+    /// for any other pointers, nor for TLS statics.
+    ///
+    /// Using the resulting pointer represents a *direct* access to that memory
+    /// (e.g. by directly using a `static`),
+    /// as opposed to access through a pointer that was created by the program.
+    ///
+    /// This function can fail only if `ptr` points to an `extern static`.
+    #[inline]
+    pub fn global_base_pointer(
+        &self,
+        ptr: Pointer<CtfeProvenance>,
+    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
+        let alloc_id = ptr.provenance.alloc_id();
+        // We need to handle `extern static`.
+        match self.tcx.try_get_global_alloc(alloc_id) {
+            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
+                // Thread-local statics do not have a constant address. They *must* be accessed via
+                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
+                bug!("global memory cannot point to thread-local static")
+            }
+            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
+                return M::extern_static_base_pointer(self, def_id);
+            }
+            _ => {}
+        }
+        // And we need to get the provenance.
+        M::adjust_alloc_base_pointer(self, ptr)
+    }
+
+    pub fn fn_ptr(&mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>) -> Pointer<M::Provenance> {
+        let id = match fn_val {
+            FnVal::Instance(instance) => self.tcx.reserve_and_set_fn_alloc(instance),
+            FnVal::Other(extra) => {
+                // FIXME(RalfJung): Should we have a cache here?
+                let id = self.tcx.reserve_alloc_id();
+                let old = self.memory.extra_fn_ptr_map.insert(id, extra);
+                assert!(old.is_none());
+                id
+            }
+        };
+        // Functions are global allocations, so make sure we get the right base pointer.
+        // We know this is not an `extern static` so this cannot fail.
+        self.global_base_pointer(Pointer::from(id)).unwrap()
+    }
+
+    pub fn allocate_ptr(
+        &mut self,
+        size: Size,
+        align: Align,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
+        let alloc = if M::PANIC_ON_ALLOC_FAIL {
+            Allocation::uninit(size, align)
+        } else {
+            Allocation::try_uninit(size, align)?
+        };
+        self.allocate_raw_ptr(alloc, kind)
+    }
+
+    pub fn allocate_bytes_ptr(
+        &mut self,
+        bytes: &[u8],
+        align: Align,
+        kind: MemoryKind<M::MemoryKind>,
+        mutability: Mutability,
+    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
+        let alloc = Allocation::from_bytes(bytes, align, mutability);
+        self.allocate_raw_ptr(alloc, kind)
+    }
+
+    /// This can fail only if `alloc` contains provenance.
+    pub fn allocate_raw_ptr(
+        &mut self,
+        alloc: Allocation,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
+        let id = self.tcx.reserve_alloc_id();
+        debug_assert_ne!(
+            Some(kind),
+            M::GLOBAL_KIND.map(MemoryKind::Machine),
+            "dynamically allocating global memory"
+        );
+        let alloc = M::adjust_allocation(self, id, Cow::Owned(alloc), Some(kind))?;
+        self.memory.alloc_map.insert(id, (kind, alloc.into_owned()));
+        M::adjust_alloc_base_pointer(self, Pointer::from(id))
+    }
+
+    pub fn reallocate_ptr(
+        &mut self,
+        ptr: Pointer<Option<M::Provenance>>,
+        old_size_and_align: Option<(Size, Align)>,
+        new_size: Size,
+        new_align: Align,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
+        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr)?;
+        if offset.bytes() != 0 {
+            throw_ub_custom!(
+                fluent::const_eval_realloc_or_alloc_with_offset,
+                ptr = format!("{ptr:?}"),
+                kind = "realloc"
+            );
+        }
+
+        // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
+        // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
+        let new_ptr = self.allocate_ptr(new_size, new_align, kind)?;
+        let old_size = match old_size_and_align {
+            Some((size, _align)) => size,
+            None => self.get_alloc_raw(alloc_id)?.size(),
+        };
+        // This will also call the access hooks.
+        self.mem_copy(ptr, new_ptr.into(), old_size.min(new_size), /*nonoverlapping*/ true)?;
+        self.deallocate_ptr(ptr, old_size_and_align, kind)?;
+
+        Ok(new_ptr)
+    }
+
+    #[instrument(skip(self), level = "debug")]
+    pub fn deallocate_ptr(
+        &mut self,
+        ptr: Pointer<Option<M::Provenance>>,
+        old_size_and_align: Option<(Size, Align)>,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> InterpResult<'tcx> {
+        let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr)?;
+        trace!("deallocating: {alloc_id:?}");
+
+        if offset.bytes() != 0 {
+            throw_ub_custom!(
+                fluent::const_eval_realloc_or_alloc_with_offset,
+                ptr = format!("{ptr:?}"),
+                kind = "dealloc",
+            );
+        }
+
+        let Some((alloc_kind, mut alloc)) = self.memory.alloc_map.remove(&alloc_id) else {
+            // Deallocating global memory -- always an error
+            return Err(match self.tcx.try_get_global_alloc(alloc_id) {
+                Some(GlobalAlloc::Function(..)) => {
+                    err_ub_custom!(
+                        fluent::const_eval_invalid_dealloc,
+                        alloc_id = alloc_id,
+                        kind = "fn",
+                    )
+                }
+                Some(GlobalAlloc::VTable(..)) => {
+                    err_ub_custom!(
+                        fluent::const_eval_invalid_dealloc,
+                        alloc_id = alloc_id,
+                        kind = "vtable",
+                    )
+                }
+                Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
+                    err_ub_custom!(
+                        fluent::const_eval_invalid_dealloc,
+                        alloc_id = alloc_id,
+                        kind = "static_mem"
+                    )
+                }
+                None => err_ub!(PointerUseAfterFree(alloc_id, CheckInAllocMsg::MemoryAccessTest)),
+            }
+            .into());
+        };
+
+        if alloc.mutability.is_not() {
+            throw_ub_custom!(fluent::const_eval_dealloc_immutable, alloc = alloc_id,);
+        }
+        if alloc_kind != kind {
+            throw_ub_custom!(
+                fluent::const_eval_dealloc_kind_mismatch,
+                alloc = alloc_id,
+                alloc_kind = format!("{alloc_kind}"),
+                kind = format!("{kind}"),
+            );
+        }
+        if let Some((size, align)) = old_size_and_align {
+            if size != alloc.size() || align != alloc.align {
+                throw_ub_custom!(
+                    fluent::const_eval_dealloc_incorrect_layout,
+                    alloc = alloc_id,
+                    size = alloc.size().bytes(),
+                    align = alloc.align.bytes(),
+                    size_found = size.bytes(),
+                    align_found = align.bytes(),
+                )
+            }
+        }
+
+        // Let the machine take some extra action
+        let size = alloc.size();
+        M::before_memory_deallocation(
+            self.tcx,
+            &mut self.machine,
+            &mut alloc.extra,
+            (alloc_id, prov),
+            size,
+            alloc.align,
+        )?;
+
+        // Don't forget to remember size and align of this now-dead allocation
+        let old = self.memory.dead_alloc_map.insert(alloc_id, (size, alloc.align));
+        if old.is_some() {
+            bug!("Nothing can be deallocated twice");
+        }
+
+        Ok(())
+    }
+
+    /// Internal helper function to determine the allocation and offset of a pointer (if any).
+    #[inline(always)]
+    fn get_ptr_access(
+        &self,
+        ptr: Pointer<Option<M::Provenance>>,
+        size: Size,
+    ) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
+        self.check_and_deref_ptr(
+            ptr,
+            size,
+            CheckInAllocMsg::MemoryAccessTest,
+            |alloc_id, offset, prov| {
+                let (size, align) = self
+                    .get_live_alloc_size_and_align(alloc_id, CheckInAllocMsg::MemoryAccessTest)?;
+                Ok((size, align, (alloc_id, offset, prov)))
+            },
+        )
+    }
+
+    /// Check if the given pointer points to live memory of the given `size`.
+    /// The caller can control the error message for the out-of-bounds case.
+    #[inline(always)]
+    pub fn check_ptr_access(
+        &self,
+        ptr: Pointer<Option<M::Provenance>>,
+        size: Size,
+        msg: CheckInAllocMsg,
+    ) -> InterpResult<'tcx> {
+        self.check_and_deref_ptr(ptr, size, msg, |alloc_id, _, _| {
+            let (size, align) = self.get_live_alloc_size_and_align(alloc_id, msg)?;
+            Ok((size, align, ()))
+        })?;
+        Ok(())
+    }
+
+    /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
+    /// to the allocation it points to. Supports both shared and mutable references, as the actual
+    /// checking is offloaded to a helper closure.
+    ///
+    /// Returns `None` if and only if the size is 0.
+    fn check_and_deref_ptr<T>(
+        &self,
+        ptr: Pointer<Option<M::Provenance>>,
+        size: Size,
+        msg: CheckInAllocMsg,
+        alloc_size: impl FnOnce(
+            AllocId,
+            Size,
+            M::ProvenanceExtra,
+        ) -> InterpResult<'tcx, (Size, Align, T)>,
+    ) -> InterpResult<'tcx, Option<T>> {
+        Ok(match self.ptr_try_get_alloc_id(ptr) {
+            Err(addr) => {
+                // We couldn't get a proper allocation. This is only okay if the access size is 0,
+                // and the address is not null.
+                if size.bytes() > 0 || addr == 0 {
+                    throw_ub!(DanglingIntPointer(addr, msg));
+                }
+                None
+            }
+            Ok((alloc_id, offset, prov)) => {
+                let (alloc_size, _alloc_align, ret_val) = alloc_size(alloc_id, offset, prov)?;
+                // Test bounds. This also ensures non-null.
+                // It is sufficient to check this for the end pointer. Also check for overflow!
+                if offset.checked_add(size, &self.tcx).map_or(true, |end| end > alloc_size) {
+                    throw_ub!(PointerOutOfBounds {
+                        alloc_id,
+                        alloc_size,
+                        ptr_offset: self.target_usize_to_isize(offset.bytes()),
+                        ptr_size: size,
+                        msg,
+                    })
+                }
+                // Ensure we never consider the null pointer dereferenceable.
+                if M::Provenance::OFFSET_IS_ADDR {
+                    assert_ne!(ptr.addr(), Size::ZERO);
+                }
+
+                // We can still be zero-sized in this branch, in which case we have to
+                // return `None`.
+                if size.bytes() == 0 { None } else { Some(ret_val) }
+            }
+        })
+    }
+
+    pub(super) fn check_misalign(
+        &self,
+        misaligned: Option<Misalignment>,
+        msg: CheckAlignMsg,
+    ) -> InterpResult<'tcx> {
+        if let Some(misaligned) = misaligned {
+            throw_ub!(AlignmentCheckFailed(misaligned, msg))
+        }
+        Ok(())
+    }
+
+    pub(super) fn is_ptr_misaligned(
+        &self,
+        ptr: Pointer<Option<M::Provenance>>,
+        align: Align,
+    ) -> Option<Misalignment> {
+        if !M::enforce_alignment(self) || align.bytes() == 1 {
+            return None;
+        }
+
+        #[inline]
+        fn offset_misalignment(offset: u64, align: Align) -> Option<Misalignment> {
+            if offset % align.bytes() == 0 {
+                None
+            } else {
+                // The biggest power of two through which `offset` is divisible.
+                let offset_pow2 = 1 << offset.trailing_zeros();
+                Some(Misalignment { has: Align::from_bytes(offset_pow2).unwrap(), required: align })
+            }
+        }
+
+        match self.ptr_try_get_alloc_id(ptr) {
+            Err(addr) => offset_misalignment(addr, align),
+            Ok((alloc_id, offset, _prov)) => {
+                let (_size, alloc_align, kind) = self.get_alloc_info(alloc_id);
+                if let Some(misalign) =
+                    M::alignment_check(self, alloc_id, alloc_align, kind, offset, align)
+                {
+                    Some(misalign)
+                } else if M::Provenance::OFFSET_IS_ADDR {
+                    // `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
+                    offset_misalignment(ptr.addr().bytes(), align)
+                } else {
+                    // Check allocation alignment and offset alignment.
+                    if alloc_align.bytes() < align.bytes() {
+                        Some(Misalignment { has: alloc_align, required: align })
+                    } else {
+                        offset_misalignment(offset.bytes(), align)
+                    }
+                }
+            }
+        }
+    }
+
+    /// Checks a pointer for misalignment.
+    ///
+    /// The error assumes this is checking the pointer used directly for an access.
+    pub fn check_ptr_align(
+        &self,
+        ptr: Pointer<Option<M::Provenance>>,
+        align: Align,
+    ) -> InterpResult<'tcx> {
+        self.check_misalign(self.is_ptr_misaligned(ptr, align), CheckAlignMsg::AccessedPtr)
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// This function is used by Miri's provenance GC to remove unreachable entries from the dead_alloc_map.
+    pub fn remove_unreachable_allocs(&mut self, reachable_allocs: &FxHashSet<AllocId>) {
+        // Unlike all the other GC helpers where we check if an `AllocId` is found in the interpreter or
+        // is live, here all the IDs in the map are for dead allocations so we don't
+        // need to check for liveness.
+        #[allow(rustc::potential_query_instability)] // Only used from Miri, not queries.
+        self.memory.dead_alloc_map.retain(|id, _| reachable_allocs.contains(id));
+    }
+}
+
+/// Allocation accessors
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Helper function to obtain a global (tcx) allocation.
+    /// This attempts to return a reference to an existing allocation if
+    /// one can be found in `tcx`. That, however, is only possible if `tcx` and
+    /// this machine use the same pointer provenance, so it is indirected through
+    /// `M::adjust_allocation`.
+    fn get_global_alloc(
+        &self,
+        id: AllocId,
+        is_write: bool,
+    ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra, M::Bytes>>> {
+        let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) {
+            Some(GlobalAlloc::Memory(mem)) => {
+                // Memory of a constant or promoted or anonymous memory referenced by a static.
+                (mem, None)
+            }
+            Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
+            Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)),
+            None => throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccessTest)),
+            Some(GlobalAlloc::Static(def_id)) => {
+                assert!(self.tcx.is_static(def_id));
+                // Thread-local statics do not have a constant address. They *must* be accessed via
+                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
+                assert!(!self.tcx.is_thread_local_static(def_id));
+                // Notice that every static has two `AllocId` that will resolve to the same
+                // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
+                // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
+                // `eval_static_initializer` and it is the "resolved" ID.
+                // The resolved ID is never used by the interpreted program, it is hidden.
+                // This is relied upon for soundness of const-patterns; a pointer to the resolved
+                // ID would "sidestep" the checks that make sure consts do not point to statics!
+                // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
+                // contains a reference to memory that was created during its evaluation (i.e., not
+                // to another static), those inner references only exist in "resolved" form.
+                if self.tcx.is_foreign_item(def_id) {
+                    // This is unreachable in Miri, but can happen in CTFE where we actually *do* support
+                    // referencing arbitrary (declared) extern statics.
+                    throw_unsup!(ExternStatic(def_id));
+                }
+
+                // We don't give a span -- statics don't need that, they cannot be generic or associated.
+                let val = self.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
+                (val, Some(def_id))
+            }
+        };
+        M::before_access_global(self.tcx, &self.machine, id, alloc, def_id, is_write)?;
+        // We got tcx memory. Let the machine initialize its "extra" stuff.
+        M::adjust_allocation(
+            self,
+            id, // always use the ID we got as input, not the "hidden" one.
+            Cow::Borrowed(alloc.inner()),
+            M::GLOBAL_KIND.map(MemoryKind::Machine),
+        )
+    }
+
+    /// Gives raw access to the `Allocation`, without bounds or alignment checks.
+    /// The caller is responsible for calling the access hooks!
+    ///
+    /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
+    fn get_alloc_raw(
+        &self,
+        id: AllocId,
+    ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra, M::Bytes>> {
+        // The error type of the inner closure here is somewhat funny. We have two
+        // ways of "erroring": An actual error, or because we got a reference from
+        // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
+        // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
+        let a = self.memory.alloc_map.get_or(id, || {
+            let alloc = self.get_global_alloc(id, /*is_write*/ false).map_err(Err)?;
+            match alloc {
+                Cow::Borrowed(alloc) => {
+                    // We got a ref, cheaply return that as an "error" so that the
+                    // map does not get mutated.
+                    Err(Ok(alloc))
+                }
+                Cow::Owned(alloc) => {
+                    // Need to put it into the map and return a ref to that
+                    let kind = M::GLOBAL_KIND.expect(
+                        "I got a global allocation that I have to copy but the machine does \
+                            not expect that to happen",
+                    );
+                    Ok((MemoryKind::Machine(kind), alloc))
+                }
+            }
+        });
+        // Now unpack that funny error type
+        match a {
+            Ok(a) => Ok(&a.1),
+            Err(a) => a,
+        }
+    }
+
+    /// Bounds-checked *but not align-checked* allocation access.
+    pub fn get_ptr_alloc<'a>(
+        &'a self,
+        ptr: Pointer<Option<M::Provenance>>,
+        size: Size,
+    ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
+    {
+        let ptr_and_alloc = self.check_and_deref_ptr(
+            ptr,
+            size,
+            CheckInAllocMsg::MemoryAccessTest,
+            |alloc_id, offset, prov| {
+                if !self.memory.validation_in_progress.get() {
+                    // We want to call the hook on *all* accesses that involve an AllocId,
+                    // including zero-sized accesses. That means we have to do it here
+                    // rather than below in the `Some` branch.
+                    M::before_alloc_read(self, alloc_id)?;
+                }
+                let alloc = self.get_alloc_raw(alloc_id)?;
+                Ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
+            },
+        )?;
+
+        if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
+            let range = alloc_range(offset, size);
+            if !self.memory.validation_in_progress.get() {
+                M::before_memory_read(
+                    self.tcx,
+                    &self.machine,
+                    &alloc.extra,
+                    (alloc_id, prov),
+                    range,
+                )?;
+            }
+            Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
+        } else {
+            Ok(None)
+        }
+    }
+
+    /// Return the `extra` field of the given allocation.
+    pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
+        Ok(&self.get_alloc_raw(id)?.extra)
+    }
+
+    /// Return the `mutability` field of the given allocation.
+    pub fn get_alloc_mutability<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, Mutability> {
+        Ok(self.get_alloc_raw(id)?.mutability)
+    }
+
+    /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
+    /// The caller is responsible for calling the access hooks!
+    ///
+    /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
+    /// allocation.
+    fn get_alloc_raw_mut(
+        &mut self,
+        id: AllocId,
+    ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra, M::Bytes>, &mut M)> {
+        // We have "NLL problem case #3" here, which cannot be worked around without loss of
+        // efficiency even for the common case where the key is in the map.
+        // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
+        // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`.)
+        if self.memory.alloc_map.get_mut(id).is_none() {
+            // Slow path.
+            // Allocation not found locally, go look global.
+            let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
+            let kind = M::GLOBAL_KIND.expect(
+                "I got a global allocation that I have to copy but the machine does \
+                    not expect that to happen",
+            );
+            self.memory.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
+        }
+
+        let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
+        if alloc.mutability.is_not() {
+            throw_ub!(WriteToReadOnly(id))
+        }
+        Ok((alloc, &mut self.machine))
+    }
+
+    /// Bounds-checked *but not align-checked* allocation access.
+    pub fn get_ptr_alloc_mut<'a>(
+        &'a mut self,
+        ptr: Pointer<Option<M::Provenance>>,
+        size: Size,
+    ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
+    {
+        let parts = self.get_ptr_access(ptr, size)?;
+        if let Some((alloc_id, offset, prov)) = parts {
+            let tcx = self.tcx;
+            // FIXME: can we somehow avoid looking up the allocation twice here?
+            // We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
+            let (alloc, machine) = self.get_alloc_raw_mut(alloc_id)?;
+            let range = alloc_range(offset, size);
+            M::before_memory_write(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?;
+            Ok(Some(AllocRefMut { alloc, range, tcx: *tcx, alloc_id }))
+        } else {
+            Ok(None)
+        }
+    }
+
+    /// Return the `extra` field of the given allocation.
+    pub fn get_alloc_extra_mut<'a>(
+        &'a mut self,
+        id: AllocId,
+    ) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {
+        let (alloc, machine) = self.get_alloc_raw_mut(id)?;
+        Ok((&mut alloc.extra, machine))
+    }
+
+    /// Check whether an allocation is live. This is faster than calling
+    /// [`InterpCx::get_alloc_info`] if all you need to check is whether the kind is
+    /// [`AllocKind::Dead`] because it doesn't have to look up the type and layout of statics.
+    pub fn is_alloc_live(&self, id: AllocId) -> bool {
+        self.tcx.try_get_global_alloc(id).is_some()
+            || self.memory.alloc_map.contains_key_ref(&id)
+            || self.memory.extra_fn_ptr_map.contains_key(&id)
+    }
+
+    /// Obtain the size and alignment of an allocation, even if that allocation has
+    /// been deallocated.
+    pub fn get_alloc_info(&self, id: AllocId) -> (Size, Align, AllocKind) {
+        // # Regular allocations
+        // Don't use `self.get_raw` here as that will
+        // a) cause cycles in case `id` refers to a static
+        // b) duplicate a global's allocation in miri
+        if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
+            return (alloc.size(), alloc.align, AllocKind::LiveData);
+        }
+
+        // # Function pointers
+        // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
+        if self.get_fn_alloc(id).is_some() {
+            return (Size::ZERO, Align::ONE, AllocKind::Function);
+        }
+
+        // # Statics
+        // Can't do this in the match argument, we may get cycle errors since the lock would
+        // be held throughout the match.
+        match self.tcx.try_get_global_alloc(id) {
+            Some(GlobalAlloc::Static(def_id)) => {
+                // Thread-local statics do not have a constant address. They *must* be accessed via
+                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
+                assert!(!self.tcx.is_thread_local_static(def_id));
+
+                let DefKind::Static { nested, .. } = self.tcx.def_kind(def_id) else {
+                    bug!("GlobalAlloc::Static is not a static")
+                };
+
+                let (size, align) = if nested {
+                    // Nested anonymous statics are untyped, so let's get their
+                    // size and alignment from the allocaiton itself. This always
+                    // succeeds, as the query is fed at DefId creation time, so no
+                    // evaluation actually occurs.
+                    let alloc = self.tcx.eval_static_initializer(def_id).unwrap();
+                    (alloc.0.size(), alloc.0.align)
+                } else {
+                    // Use size and align of the type for everything else. We need
+                    // to do that to
+                    // * avoid cycle errors in case of self-referential statics,
+                    // * be able to get information on extern statics.
+                    let ty = self
+                        .tcx
+                        .type_of(def_id)
+                        .no_bound_vars()
+                        .expect("statics should not have generic parameters");
+                    let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
+                    assert!(layout.is_sized());
+                    (layout.size, layout.align.abi)
+                };
+                (size, align, AllocKind::LiveData)
+            }
+            Some(GlobalAlloc::Memory(alloc)) => {
+                // Need to duplicate the logic here, because the global allocations have
+                // different associated types than the interpreter-local ones.
+                let alloc = alloc.inner();
+                (alloc.size(), alloc.align, AllocKind::LiveData)
+            }
+            Some(GlobalAlloc::Function(_)) => bug!("We already checked function pointers above"),
+            Some(GlobalAlloc::VTable(..)) => {
+                // No data to be accessed here. But vtables are pointer-aligned.
+                return (Size::ZERO, self.tcx.data_layout.pointer_align.abi, AllocKind::VTable);
+            }
+            // The rest must be dead.
+            None => {
+                // Deallocated pointers are allowed, we should be able to find
+                // them in the map.
+                let (size, align) = *self
+                    .memory
+                    .dead_alloc_map
+                    .get(&id)
+                    .expect("deallocated pointers should all be recorded in `dead_alloc_map`");
+                (size, align, AllocKind::Dead)
+            }
+        }
+    }
+
+    /// Obtain the size and alignment of a *live* allocation.
+    fn get_live_alloc_size_and_align(
+        &self,
+        id: AllocId,
+        msg: CheckInAllocMsg,
+    ) -> InterpResult<'tcx, (Size, Align)> {
+        let (size, align, kind) = self.get_alloc_info(id);
+        if matches!(kind, AllocKind::Dead) {
+            throw_ub!(PointerUseAfterFree(id, msg))
+        }
+        Ok((size, align))
+    }
+
+    fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
+        if let Some(extra) = self.memory.extra_fn_ptr_map.get(&id) {
+            Some(FnVal::Other(*extra))
+        } else {
+            match self.tcx.try_get_global_alloc(id) {
+                Some(GlobalAlloc::Function(instance)) => Some(FnVal::Instance(instance)),
+                _ => None,
+            }
+        }
+    }
+
+    pub fn get_ptr_fn(
+        &self,
+        ptr: Pointer<Option<M::Provenance>>,
+    ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
+        trace!("get_ptr_fn({:?})", ptr);
+        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr)?;
+        if offset.bytes() != 0 {
+            throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
+        }
+        self.get_fn_alloc(alloc_id)
+            .ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))).into())
+    }
+
+    pub fn get_ptr_vtable(
+        &self,
+        ptr: Pointer<Option<M::Provenance>>,
+    ) -> InterpResult<'tcx, (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>)> {
+        trace!("get_ptr_vtable({:?})", ptr);
+        let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr)?;
+        if offset.bytes() != 0 {
+            throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
+        }
+        match self.tcx.try_get_global_alloc(alloc_id) {
+            Some(GlobalAlloc::VTable(ty, trait_ref)) => Ok((ty, trait_ref)),
+            _ => throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset))),
+        }
+    }
+
+    pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
+        self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;
+        Ok(())
+    }
+
+    /// Create a lazy debug printer that prints the given allocation and all allocations it points
+    /// to, recursively.
+    #[must_use]
+    pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'mir, 'tcx, M> {
+        self.dump_allocs(vec![id])
+    }
+
+    /// Create a lazy debug printer for a list of allocations and all allocations they point to,
+    /// recursively.
+    #[must_use]
+    pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'mir, 'tcx, M> {
+        allocs.sort();
+        allocs.dedup();
+        DumpAllocs { ecx: self, allocs }
+    }
+
+    /// Print the allocation's bytes, without any nested allocations.
+    pub fn print_alloc_bytes_for_diagnostics(&self, id: AllocId) -> String {
+        // Using the "raw" access to avoid the `before_alloc_read` hook, we specifically
+        // want to be able to read all memory for diagnostics, even if that is cyclic.
+        let alloc = self.get_alloc_raw(id).unwrap();
+        let mut bytes = String::new();
+        if alloc.size() != Size::ZERO {
+            bytes = "\n".into();
+            // FIXME(translation) there might be pieces that are translatable.
+            rustc_middle::mir::pretty::write_allocation_bytes(*self.tcx, alloc, &mut bytes, "    ")
+                .unwrap();
+        }
+        bytes
+    }
+
+    /// Find leaked allocations. Allocations reachable from `static_roots` or a `Global` allocation
+    /// are not considered leaked, as well as leaks whose kind's `may_leak()` returns true.
+    pub fn find_leaked_allocations(
+        &self,
+        static_roots: &[AllocId],
+    ) -> Vec<(AllocId, MemoryKind<M::MemoryKind>, Allocation<M::Provenance, M::AllocExtra, M::Bytes>)>
+    {
+        // Collect the set of allocations that are *reachable* from `Global` allocations.
+        let reachable = {
+            let mut reachable = FxHashSet::default();
+            let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
+            let mut todo: Vec<_> =
+                self.memory.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
+                    if Some(kind) == global_kind { Some(id) } else { None }
+                });
+            todo.extend(static_roots);
+            while let Some(id) = todo.pop() {
+                if reachable.insert(id) {
+                    // This is a new allocation, add the allocation it points to `todo`.
+                    if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
+                        todo.extend(
+                            alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()),
+                        );
+                    }
+                }
+            }
+            reachable
+        };
+
+        // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
+        self.memory.alloc_map.filter_map_collect(|id, (kind, alloc)| {
+            if kind.may_leak() || reachable.contains(id) {
+                None
+            } else {
+                Some((*id, *kind, alloc.clone()))
+            }
+        })
+    }
+
+    /// Runs the close in "validation" mode, which means the machine's memory read hooks will be
+    /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.
+    pub(super) fn run_for_validation<R>(&self, f: impl FnOnce() -> R) -> R {
+        assert!(
+            self.memory.validation_in_progress.replace(true) == false,
+            "`validation_in_progress` was already set"
+        );
+        let res = f();
+        assert!(
+            self.memory.validation_in_progress.replace(false) == true,
+            "`validation_in_progress` was unset by someone else"
+        );
+        res
+    }
+}
+
+#[doc(hidden)]
+/// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
+pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    ecx: &'a InterpCx<'mir, 'tcx, M>,
+    allocs: Vec<AllocId>,
+}
+
+impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
+    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        // Cannot be a closure because it is generic in `Prov`, `Extra`.
+        fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
+            fmt: &mut std::fmt::Formatter<'_>,
+            tcx: TyCtxt<'tcx>,
+            allocs_to_print: &mut VecDeque<AllocId>,
+            alloc: &Allocation<Prov, Extra, Bytes>,
+        ) -> std::fmt::Result {
+            for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())
+            {
+                allocs_to_print.push_back(alloc_id);
+            }
+            write!(fmt, "{}", display_allocation(tcx, alloc))
+        }
+
+        let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
+        // `allocs_printed` contains all allocations that we have already printed.
+        let mut allocs_printed = FxHashSet::default();
+
+        while let Some(id) = allocs_to_print.pop_front() {
+            if !allocs_printed.insert(id) {
+                // Already printed, so skip this.
+                continue;
+            }
+
+            write!(fmt, "{id:?}")?;
+            match self.ecx.memory.alloc_map.get(id) {
+                Some((kind, alloc)) => {
+                    // normal alloc
+                    write!(fmt, " ({kind}, ")?;
+                    write_allocation_track_relocs(
+                        &mut *fmt,
+                        *self.ecx.tcx,
+                        &mut allocs_to_print,
+                        alloc,
+                    )?;
+                }
+                None => {
+                    // global alloc
+                    match self.ecx.tcx.try_get_global_alloc(id) {
+                        Some(GlobalAlloc::Memory(alloc)) => {
+                            write!(fmt, " (unchanged global, ")?;
+                            write_allocation_track_relocs(
+                                &mut *fmt,
+                                *self.ecx.tcx,
+                                &mut allocs_to_print,
+                                alloc.inner(),
+                            )?;
+                        }
+                        Some(GlobalAlloc::Function(func)) => {
+                            write!(fmt, " (fn: {func})")?;
+                        }
+                        Some(GlobalAlloc::VTable(ty, Some(trait_ref))) => {
+                            write!(fmt, " (vtable: impl {trait_ref} for {ty})")?;
+                        }
+                        Some(GlobalAlloc::VTable(ty, None)) => {
+                            write!(fmt, " (vtable: impl <auto trait> for {ty})")?;
+                        }
+                        Some(GlobalAlloc::Static(did)) => {
+                            write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;
+                        }
+                        None => {
+                            write!(fmt, " (deallocated)")?;
+                        }
+                    }
+                }
+            }
+            writeln!(fmt)?;
+        }
+        Ok(())
+    }
+}
+
+/// Reading and writing.
+impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes>
+    AllocRefMut<'a, 'tcx, Prov, Extra, Bytes>
+{
+    /// `range` is relative to this allocation reference, not the base of the allocation.
+    pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
+        let range = self.range.subrange(range);
+        debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
+        Ok(self
+            .alloc
+            .write_scalar(&self.tcx, range, val)
+            .map_err(|e| e.to_interp_error(self.alloc_id))?)
+    }
+
+    /// `offset` is relative to this allocation reference, not the base of the allocation.
+    pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> {
+        self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
+    }
+
+    /// Mark the entire referenced range as uninitialized
+    pub fn write_uninit(&mut self) -> InterpResult<'tcx> {
+        Ok(self
+            .alloc
+            .write_uninit(&self.tcx, self.range)
+            .map_err(|e| e.to_interp_error(self.alloc_id))?)
+    }
+}
+
+impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
+    /// `range` is relative to this allocation reference, not the base of the allocation.
+    pub fn read_scalar(
+        &self,
+        range: AllocRange,
+        read_provenance: bool,
+    ) -> InterpResult<'tcx, Scalar<Prov>> {
+        let range = self.range.subrange(range);
+        let res = self
+            .alloc
+            .read_scalar(&self.tcx, range, read_provenance)
+            .map_err(|e| e.to_interp_error(self.alloc_id))?;
+        debug!("read_scalar at {:?}{range:?}: {res:?}", self.alloc_id);
+        Ok(res)
+    }
+
+    /// `range` is relative to this allocation reference, not the base of the allocation.
+    pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, Scalar<Prov>> {
+        self.read_scalar(range, /*read_provenance*/ false)
+    }
+
+    /// `offset` is relative to this allocation reference, not the base of the allocation.
+    pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> {
+        self.read_scalar(
+            alloc_range(offset, self.tcx.data_layout().pointer_size),
+            /*read_provenance*/ true,
+        )
+    }
+
+    /// `range` is relative to this allocation reference, not the base of the allocation.
+    pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> {
+        Ok(self
+            .alloc
+            .get_bytes_strip_provenance(&self.tcx, self.range)
+            .map_err(|e| e.to_interp_error(self.alloc_id))?)
+    }
+
+    /// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
+    pub fn has_provenance(&self) -> bool {
+        !self.alloc.provenance().range_empty(self.range, &self.tcx)
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Reads the given number of bytes from memory, and strips their provenance if possible.
+    /// Returns them as a slice.
+    ///
+    /// Performs appropriate bounds checks.
+    pub fn read_bytes_ptr_strip_provenance(
+        &self,
+        ptr: Pointer<Option<M::Provenance>>,
+        size: Size,
+    ) -> InterpResult<'tcx, &[u8]> {
+        let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
+            // zero-sized access
+            return Ok(&[]);
+        };
+        // Side-step AllocRef and directly access the underlying bytes more efficiently.
+        // (We are staying inside the bounds here so all is good.)
+        Ok(alloc_ref
+            .alloc
+            .get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range)
+            .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?)
+    }
+
+    /// Writes the given stream of bytes into memory.
+    ///
+    /// Performs appropriate bounds checks.
+    pub fn write_bytes_ptr(
+        &mut self,
+        ptr: Pointer<Option<M::Provenance>>,
+        src: impl IntoIterator<Item = u8>,
+    ) -> InterpResult<'tcx> {
+        let mut src = src.into_iter();
+        let (lower, upper) = src.size_hint();
+        let len = upper.expect("can only write bounded iterators");
+        assert_eq!(lower, len, "can only write iterators with a precise length");
+
+        let size = Size::from_bytes(len);
+        let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size)? else {
+            // zero-sized access
+            assert_matches!(src.next(), None, "iterator said it was empty but returned an element");
+            return Ok(());
+        };
+
+        // Side-step AllocRef and directly access the underlying bytes more efficiently.
+        // (We are staying inside the bounds here so all is good.)
+        let alloc_id = alloc_ref.alloc_id;
+        let bytes = alloc_ref
+            .alloc
+            .get_bytes_mut(&alloc_ref.tcx, alloc_ref.range)
+            .map_err(move |e| e.to_interp_error(alloc_id))?;
+        // `zip` would stop when the first iterator ends; we want to definitely
+        // cover all of `bytes`.
+        for dest in bytes {
+            *dest = src.next().expect("iterator was shorter than it said it would be");
+        }
+        assert_matches!(src.next(), None, "iterator was longer than it said it would be");
+        Ok(())
+    }
+
+    pub fn mem_copy(
+        &mut self,
+        src: Pointer<Option<M::Provenance>>,
+        dest: Pointer<Option<M::Provenance>>,
+        size: Size,
+        nonoverlapping: bool,
+    ) -> InterpResult<'tcx> {
+        self.mem_copy_repeatedly(src, dest, size, 1, nonoverlapping)
+    }
+
+    pub fn mem_copy_repeatedly(
+        &mut self,
+        src: Pointer<Option<M::Provenance>>,
+        dest: Pointer<Option<M::Provenance>>,
+        size: Size,
+        num_copies: u64,
+        nonoverlapping: bool,
+    ) -> InterpResult<'tcx> {
+        let tcx = self.tcx;
+        // We need to do our own bounds-checks.
+        let src_parts = self.get_ptr_access(src, size)?;
+        let dest_parts = self.get_ptr_access(dest, size * num_copies)?; // `Size` multiplication
+
+        // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
+        // and once below to get the underlying `&[mut] Allocation`.
+
+        // Source alloc preparations and access hooks.
+        let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
+            // Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
+            return Ok(());
+        };
+        let src_alloc = self.get_alloc_raw(src_alloc_id)?;
+        let src_range = alloc_range(src_offset, size);
+        assert!(!self.memory.validation_in_progress.get(), "we can't be copying during validation");
+        M::before_memory_read(
+            tcx,
+            &self.machine,
+            &src_alloc.extra,
+            (src_alloc_id, src_prov),
+            src_range,
+        )?;
+        // We need the `dest` ptr for the next operation, so we get it now.
+        // We already did the source checks and called the hooks so we are good to return early.
+        let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {
+            // Zero-sized *destination*.
+            return Ok(());
+        };
+
+        // Prepare getting source provenance.
+        let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
+        // first copy the provenance to a temporary buffer, because
+        // `get_bytes_mut` will clear the provenance, which is correct,
+        // since we don't want to keep any provenance at the target.
+        // This will also error if copying partial provenance is not supported.
+        let provenance = src_alloc
+            .provenance()
+            .prepare_copy(src_range, dest_offset, num_copies, self)
+            .map_err(|e| e.to_interp_error(dest_alloc_id))?;
+        // Prepare a copy of the initialization mask.
+        let init = src_alloc.init_mask().prepare_copy(src_range);
+
+        // Destination alloc preparations and access hooks.
+        let (dest_alloc, extra) = self.get_alloc_raw_mut(dest_alloc_id)?;
+        let dest_range = alloc_range(dest_offset, size * num_copies);
+        M::before_memory_write(
+            tcx,
+            extra,
+            &mut dest_alloc.extra,
+            (dest_alloc_id, dest_prov),
+            dest_range,
+        )?;
+        let dest_bytes = dest_alloc
+            .get_bytes_mut_ptr(&tcx, dest_range)
+            .map_err(|e| e.to_interp_error(dest_alloc_id))?
+            .as_mut_ptr();
+
+        if init.no_bytes_init() {
+            // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
+            // is marked as uninitialized but we otherwise omit changing the byte representation which may
+            // be arbitrary for uninitialized bytes.
+            // This also avoids writing to the target bytes so that the backing allocation is never
+            // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
+            // operating system this can avoid physically allocating the page.
+            dest_alloc
+                .write_uninit(&tcx, dest_range)
+                .map_err(|e| e.to_interp_error(dest_alloc_id))?;
+            // We can forget about the provenance, this is all not initialized anyway.
+            return Ok(());
+        }
+
+        // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
+        // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
+        // `dest` could possibly overlap.
+        // The pointers above remain valid even if the `HashMap` table is moved around because they
+        // point into the `Vec` storing the bytes.
+        unsafe {
+            if src_alloc_id == dest_alloc_id {
+                if nonoverlapping {
+                    // `Size` additions
+                    if (src_offset <= dest_offset && src_offset + size > dest_offset)
+                        || (dest_offset <= src_offset && dest_offset + size > src_offset)
+                    {
+                        throw_ub_custom!(fluent::const_eval_copy_nonoverlapping_overlapping);
+                    }
+                }
+            }
+
+            let size_in_bytes = size.bytes_usize();
+            // For particularly large arrays (where this is perf-sensitive) it's common that
+            // we're writing a single byte repeatedly. So, optimize that case to a memset.
+            if size_in_bytes == 1 {
+                debug_assert!(num_copies >= 1); // we already handled the zero-sized cases above.
+                // SAFETY: `src_bytes` would be read from anyway by `copy` below (num_copies >= 1).
+                let value = *src_bytes;
+                dest_bytes.write_bytes(value, (size * num_copies).bytes_usize());
+            } else if src_alloc_id == dest_alloc_id {
+                let mut dest_ptr = dest_bytes;
+                for _ in 0..num_copies {
+                    ptr::copy(src_bytes, dest_ptr, size_in_bytes);
+                    dest_ptr = dest_ptr.add(size_in_bytes);
+                }
+            } else {
+                let mut dest_ptr = dest_bytes;
+                for _ in 0..num_copies {
+                    ptr::copy_nonoverlapping(src_bytes, dest_ptr, size_in_bytes);
+                    dest_ptr = dest_ptr.add(size_in_bytes);
+                }
+            }
+        }
+
+        // now fill in all the "init" data
+        dest_alloc.init_mask_apply_copy(
+            init,
+            alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
+            num_copies,
+        );
+        // copy the provenance to the destination
+        dest_alloc.provenance_apply_copy(provenance);
+
+        Ok(())
+    }
+}
+
+/// Machine pointer introspection.
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Test if this value might be null.
+    /// If the machine does not support ptr-to-int casts, this is conservative.
+    pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
+        Ok(match scalar.try_to_int() {
+            Ok(int) => int.is_null(),
+            Err(_) => {
+                // Can only happen during CTFE.
+                let ptr = scalar.to_pointer(self)?;
+                match self.ptr_try_get_alloc_id(ptr) {
+                    Ok((alloc_id, offset, _)) => {
+                        let (size, _align, _kind) = self.get_alloc_info(alloc_id);
+                        // If the pointer is out-of-bounds, it may be null.
+                        // Note that one-past-the-end (offset == size) is still inbounds, and never null.
+                        offset > size
+                    }
+                    Err(_offset) => bug!("a non-int scalar is always a pointer"),
+                }
+            }
+        })
+    }
+
+    /// Turning a "maybe pointer" into a proper pointer (and some information
+    /// about where it points), or an absolute address.
+    ///
+    /// The result must be used immediately; it is not allowed to convert
+    /// the returned data back into a `Pointer` and store that in machine state.
+    /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
+    /// we don't have an operation to turn it back into `M::Provenance`.)
+    pub fn ptr_try_get_alloc_id(
+        &self,
+        ptr: Pointer<Option<M::Provenance>>,
+    ) -> Result<(AllocId, Size, M::ProvenanceExtra), u64> {
+        match ptr.into_pointer_or_addr() {
+            Ok(ptr) => match M::ptr_get_alloc(self, ptr) {
+                Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)),
+                None => {
+                    assert!(M::Provenance::OFFSET_IS_ADDR);
+                    let (_, addr) = ptr.into_parts();
+                    Err(addr.bytes())
+                }
+            },
+            Err(addr) => Err(addr.bytes()),
+        }
+    }
+
+    /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
+    ///
+    /// The result must be used immediately; it is not allowed to convert
+    /// the returned data back into a `Pointer` and store that in machine state.
+    /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
+    /// we don't have an operation to turn it back into `M::Provenance`.)
+    #[inline(always)]
+    pub fn ptr_get_alloc_id(
+        &self,
+        ptr: Pointer<Option<M::Provenance>>,
+    ) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {
+        self.ptr_try_get_alloc_id(ptr).map_err(|offset| {
+            err_ub!(DanglingIntPointer(offset, CheckInAllocMsg::InboundsTest)).into()
+        })
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
new file mode 100644
index 00000000000..474d35b2aa3
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -0,0 +1,43 @@
+//! An interpreter for MIR used in CTFE and by miri
+
+mod cast;
+mod discriminant;
+mod eval_context;
+mod intern;
+mod intrinsics;
+mod machine;
+mod memory;
+mod operand;
+mod operator;
+mod place;
+mod projection;
+mod step;
+mod terminator;
+mod traits;
+mod util;
+mod validity;
+mod visitor;
+
+pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here
+
+pub use self::eval_context::{format_interp_error, Frame, FrameInfo, InterpCx, StackPopCleanup};
+pub use self::intern::{
+    intern_const_alloc_for_constprop, intern_const_alloc_recursive, HasStaticRootDefId, InternKind,
+};
+pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
+pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
+pub use self::operand::{ImmTy, Immediate, OpTy, Readable};
+pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable};
+pub use self::projection::{OffsetMode, Projectable};
+pub use self::terminator::FnArg;
+pub use self::validity::{CtfeValidationMode, RefTracking};
+pub use self::visitor::ValueVisitor;
+
+use self::{
+    operand::Operand,
+    place::{MemPlace, Place},
+};
+
+pub(crate) use self::intrinsics::eval_nullary_intrinsic;
+pub(crate) use self::util::create_static_alloc;
+use eval_context::{from_known_layout, mir_assign_valid_types};
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
new file mode 100644
index 00000000000..e49067a2f00
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -0,0 +1,798 @@
+//! Functions concerning immediate values and operands, and reading from operands.
+//! All high-level functions to read from memory work on operands as sources.
+
+use std::assert_matches::assert_matches;
+
+use either::{Either, Left, Right};
+
+use rustc_hir::def::Namespace;
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
+use rustc_middle::ty::{ConstInt, Ty, TyCtxt};
+use rustc_middle::{mir, ty};
+use rustc_target::abi::{self, Abi, HasDataLayout, Size};
+
+use super::{
+    alloc_range, from_known_layout, mir_assign_valid_types, CtfeProvenance, InterpCx, InterpResult,
+    MPlaceTy, Machine, MemPlace, MemPlaceMeta, OffsetMode, PlaceTy, Pointer, Projectable,
+    Provenance, Scalar,
+};
+
+/// An `Immediate` represents a single immediate self-contained Rust value.
+///
+/// For optimization of a few very common cases, there is also a representation for a pair of
+/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
+/// operations and wide pointers. This idea was taken from rustc's codegen.
+/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
+/// defined on `Immediate`, and do not have to work with a `Place`.
+#[derive(Copy, Clone, Debug)]
+pub enum Immediate<Prov: Provenance = CtfeProvenance> {
+    /// A single scalar value (must have *initialized* `Scalar` ABI).
+    Scalar(Scalar<Prov>),
+    /// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
+    /// `Scalar::Initialized`).
+    ScalarPair(Scalar<Prov>, Scalar<Prov>),
+    /// A value of fully uninitialized memory. Can have arbitrary size and layout, but must be sized.
+    Uninit,
+}
+
+impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> {
+    #[inline(always)]
+    fn from(val: Scalar<Prov>) -> Self {
+        Immediate::Scalar(val)
+    }
+}
+
+impl<Prov: Provenance> Immediate<Prov> {
+    pub fn new_pointer_with_meta(
+        ptr: Pointer<Option<Prov>>,
+        meta: MemPlaceMeta<Prov>,
+        cx: &impl HasDataLayout,
+    ) -> Self {
+        let ptr = Scalar::from_maybe_pointer(ptr, cx);
+        match meta {
+            MemPlaceMeta::None => Immediate::from(ptr),
+            MemPlaceMeta::Meta(meta) => Immediate::ScalarPair(ptr, meta),
+        }
+    }
+
+    pub fn new_slice(ptr: Pointer<Option<Prov>>, len: u64, cx: &impl HasDataLayout) -> Self {
+        Immediate::ScalarPair(
+            Scalar::from_maybe_pointer(ptr, cx),
+            Scalar::from_target_usize(len, cx),
+        )
+    }
+
+    pub fn new_dyn_trait(
+        val: Pointer<Option<Prov>>,
+        vtable: Pointer<Option<Prov>>,
+        cx: &impl HasDataLayout,
+    ) -> Self {
+        Immediate::ScalarPair(
+            Scalar::from_maybe_pointer(val, cx),
+            Scalar::from_maybe_pointer(vtable, cx),
+        )
+    }
+
+    #[inline]
+    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+    pub fn to_scalar(self) -> Scalar<Prov> {
+        match self {
+            Immediate::Scalar(val) => val,
+            Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"),
+            Immediate::Uninit => bug!("Got uninit where a scalar was expected"),
+        }
+    }
+
+    #[inline]
+    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+    pub fn to_scalar_pair(self) -> (Scalar<Prov>, Scalar<Prov>) {
+        match self {
+            Immediate::ScalarPair(val1, val2) => (val1, val2),
+            Immediate::Scalar(..) => bug!("Got a scalar where a scalar pair was expected"),
+            Immediate::Uninit => bug!("Got uninit where a scalar pair was expected"),
+        }
+    }
+
+    /// Returns the scalar from the first component and optionally the 2nd component as metadata.
+    #[inline]
+    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+    pub fn to_scalar_and_meta(self) -> (Scalar<Prov>, MemPlaceMeta<Prov>) {
+        match self {
+            Immediate::ScalarPair(val1, val2) => (val1, MemPlaceMeta::Meta(val2)),
+            Immediate::Scalar(val) => (val, MemPlaceMeta::None),
+            Immediate::Uninit => bug!("Got uninit where a scalar or scalar pair was expected"),
+        }
+    }
+}
+
+// ScalarPair needs a type to interpret, so we often have an immediate and a type together
+// as input for binary and cast operations.
+#[derive(Clone)]
+pub struct ImmTy<'tcx, Prov: Provenance = CtfeProvenance> {
+    imm: Immediate<Prov>,
+    pub layout: TyAndLayout<'tcx>,
+}
+
+impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        /// Helper function for printing a scalar to a FmtPrinter
+        fn p<'a, 'tcx, Prov: Provenance>(
+            cx: &mut FmtPrinter<'a, 'tcx>,
+            s: Scalar<Prov>,
+            ty: Ty<'tcx>,
+        ) -> Result<(), std::fmt::Error> {
+            match s {
+                Scalar::Int(int) => cx.pretty_print_const_scalar_int(int, ty, true),
+                Scalar::Ptr(ptr, _sz) => {
+                    // Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to
+                    // print what is points to, which would fail since it has no access to the local
+                    // memory.
+                    cx.pretty_print_const_pointer(ptr, ty)
+                }
+            }
+        }
+        ty::tls::with(|tcx| {
+            match self.imm {
+                Immediate::Scalar(s) => {
+                    if let Some(ty) = tcx.lift(self.layout.ty) {
+                        let s =
+                            FmtPrinter::print_string(tcx, Namespace::ValueNS, |cx| p(cx, s, ty))?;
+                        f.write_str(&s)?;
+                        return Ok(());
+                    }
+                    write!(f, "{:x}: {}", s, self.layout.ty)
+                }
+                Immediate::ScalarPair(a, b) => {
+                    // FIXME(oli-obk): at least print tuples and slices nicely
+                    write!(f, "({:x}, {:x}): {}", a, b, self.layout.ty)
+                }
+                Immediate::Uninit => {
+                    write!(f, "uninit: {}", self.layout.ty)
+                }
+            }
+        })
+    }
+}
+
+impl<Prov: Provenance> std::fmt::Debug for ImmTy<'_, Prov> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        // Printing `layout` results in too much noise; just print a nice version of the type.
+        f.debug_struct("ImmTy")
+            .field("imm", &self.imm)
+            .field("ty", &format_args!("{}", self.layout.ty))
+            .finish()
+    }
+}
+
+impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
+    type Target = Immediate<Prov>;
+    #[inline(always)]
+    fn deref(&self) -> &Immediate<Prov> {
+        &self.imm
+    }
+}
+
+impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
+    #[inline]
+    pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+        debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
+        ImmTy { imm: val.into(), layout }
+    }
+
+    #[inline]
+    pub fn from_scalar_pair(a: Scalar<Prov>, b: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+        debug_assert!(
+            matches!(layout.abi, Abi::ScalarPair(..)),
+            "`ImmTy::from_scalar_pair` on non-scalar-pair layout"
+        );
+        let imm = Immediate::ScalarPair(a, b);
+        ImmTy { imm, layout }
+    }
+
+    #[inline(always)]
+    pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+        debug_assert!(
+            match (imm, layout.abi) {
+                (Immediate::Scalar(..), Abi::Scalar(..)) => true,
+                (Immediate::ScalarPair(..), Abi::ScalarPair(..)) => true,
+                (Immediate::Uninit, _) if layout.is_sized() => true,
+                _ => false,
+            },
+            "immediate {imm:?} does not fit to layout {layout:?}",
+        );
+        ImmTy { imm, layout }
+    }
+
+    #[inline]
+    pub fn uninit(layout: TyAndLayout<'tcx>) -> Self {
+        debug_assert!(layout.is_sized(), "immediates must be sized");
+        ImmTy { imm: Immediate::Uninit, layout }
+    }
+
+    #[inline]
+    pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
+        Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
+    }
+    #[inline]
+    pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
+        Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
+    }
+
+    #[inline]
+    pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
+        Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
+    }
+
+    #[inline]
+    pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
+        Self::from_scalar(Scalar::from_int(i, layout.size), layout)
+    }
+
+    #[inline]
+    pub fn from_bool(b: bool, tcx: TyCtxt<'tcx>) -> Self {
+        let layout = tcx.layout_of(ty::ParamEnv::reveal_all().and(tcx.types.bool)).unwrap();
+        Self::from_scalar(Scalar::from_bool(b), layout)
+    }
+
+    #[inline]
+    pub fn to_const_int(self) -> ConstInt {
+        assert!(self.layout.ty.is_integral());
+        let int = self.to_scalar().assert_int();
+        ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
+    }
+
+    /// Compute the "sub-immediate" that is located within the `base` at the given offset with the
+    /// given layout.
+    // Not called `offset` to avoid confusion with the trait method.
+    fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
+        debug_assert!(layout.is_sized(), "unsized immediates are not a thing");
+        // `ImmTy` have already been checked to be in-bounds, so we can just check directly if this
+        // remains in-bounds. This cannot actually be violated since projections are type-checked
+        // and bounds-checked.
+        assert!(
+            offset + layout.size <= self.layout.size,
+            "attempting to project to field at offset {} with size {} into immediate with layout {:#?}",
+            offset.bytes(),
+            layout.size.bytes(),
+            self.layout,
+        );
+        // This makes several assumptions about what layouts we will encounter; we match what
+        // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
+        let inner_val: Immediate<_> = match (**self, self.layout.abi) {
+            // If the entire value is uninit, then so is the field (can happen in ConstProp).
+            (Immediate::Uninit, _) => Immediate::Uninit,
+            // If the field is uninhabited, we can forget the data (can happen in ConstProp).
+            // `enum S { A(!), B, C }` is an example of an enum with Scalar layout that
+            // has an `Uninhabited` variant, which means this case is possible.
+            _ if layout.abi.is_uninhabited() => Immediate::Uninit,
+            // the field contains no information, can be left uninit
+            // (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST)
+            _ if layout.is_zst() => Immediate::Uninit,
+            // some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
+            // to detect those here and also give them no data
+            _ if matches!(layout.abi, Abi::Aggregate { .. })
+                && matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) =>
+            {
+                Immediate::Uninit
+            }
+            // the field covers the entire type
+            _ if layout.size == self.layout.size => {
+                assert_eq!(offset.bytes(), 0);
+                assert!(
+                    match (self.layout.abi, layout.abi) {
+                        (Abi::Scalar(..), Abi::Scalar(..)) => true,
+                        (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
+                        _ => false,
+                    },
+                    "cannot project into {} immediate with equally-sized field {}\nouter ABI: {:#?}\nfield ABI: {:#?}",
+                    self.layout.ty,
+                    layout.ty,
+                    self.layout.abi,
+                    layout.abi,
+                );
+                **self
+            }
+            // extract fields from types with `ScalarPair` ABI
+            (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
+                assert!(matches!(layout.abi, Abi::Scalar(..)));
+                Immediate::from(if offset.bytes() == 0 {
+                    debug_assert_eq!(layout.size, a.size(cx));
+                    a_val
+                } else {
+                    debug_assert_eq!(offset, a.size(cx).align_to(b.align(cx).abi));
+                    debug_assert_eq!(layout.size, b.size(cx));
+                    b_val
+                })
+            }
+            // everything else is a bug
+            _ => bug!("invalid field access on immediate {}, layout {:#?}", self, self.layout),
+        };
+
+        ImmTy::from_immediate(inner_val, layout)
+    }
+}
+
+impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    #[inline(always)]
+    fn meta(&self) -> MemPlaceMeta<Prov> {
+        debug_assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here
+        MemPlaceMeta::None
+    }
+
+    fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        offset: Size,
+        _mode: OffsetMode,
+        meta: MemPlaceMeta<Prov>,
+        layout: TyAndLayout<'tcx>,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, Self> {
+        assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway
+        Ok(self.offset_(offset, layout, ecx))
+    }
+
+    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        Ok(self.clone().into())
+    }
+}
+
+/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
+/// or still in memory. The latter is an optimization, to delay reading that chunk of
+/// memory and to avoid having to store arbitrary-sized data here.
+#[derive(Copy, Clone, Debug)]
+pub(super) enum Operand<Prov: Provenance = CtfeProvenance> {
+    Immediate(Immediate<Prov>),
+    Indirect(MemPlace<Prov>),
+}
+
+#[derive(Clone)]
+pub struct OpTy<'tcx, Prov: Provenance = CtfeProvenance> {
+    op: Operand<Prov>, // Keep this private; it helps enforce invariants.
+    pub layout: TyAndLayout<'tcx>,
+}
+
+impl<Prov: Provenance> std::fmt::Debug for OpTy<'_, Prov> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        // Printing `layout` results in too much noise; just print a nice version of the type.
+        f.debug_struct("OpTy")
+            .field("op", &self.op)
+            .field("ty", &format_args!("{}", self.layout.ty))
+            .finish()
+    }
+}
+
+impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+    #[inline(always)]
+    fn from(val: ImmTy<'tcx, Prov>) -> Self {
+        OpTy { op: Operand::Immediate(val.imm), layout: val.layout }
+    }
+}
+
+impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+    #[inline(always)]
+    fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
+        OpTy { op: Operand::Indirect(*mplace.mplace()), layout: mplace.layout }
+    }
+}
+
+impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
+    #[inline(always)]
+    pub(super) fn op(&self) -> &Operand<Prov> {
+        &self.op
+    }
+}
+
+impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    #[inline]
+    fn meta(&self) -> MemPlaceMeta<Prov> {
+        match self.as_mplace_or_imm() {
+            Left(mplace) => mplace.meta(),
+            Right(_) => {
+                debug_assert!(self.layout.is_sized(), "unsized immediates are not a thing");
+                MemPlaceMeta::None
+            }
+        }
+    }
+
+    fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        offset: Size,
+        mode: OffsetMode,
+        meta: MemPlaceMeta<Prov>,
+        layout: TyAndLayout<'tcx>,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, Self> {
+        match self.as_mplace_or_imm() {
+            Left(mplace) => Ok(mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into()),
+            Right(imm) => {
+                assert_matches!(meta, MemPlaceMeta::None); // no place to store metadata here
+                // Every part of an uninit is uninit.
+                Ok(imm.offset_(offset, layout, ecx).into())
+            }
+        }
+    }
+
+    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        Ok(self.clone())
+    }
+}
+
+/// The `Readable` trait describes interpreter values that one can read from.
+pub trait Readable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
+    fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>>;
+}
+
+impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for OpTy<'tcx, Prov> {
+    #[inline(always)]
+    fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+        self.as_mplace_or_imm()
+    }
+}
+
+impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+    #[inline(always)]
+    fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+        Left(self.clone())
+    }
+}
+
+impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for ImmTy<'tcx, Prov> {
+    #[inline(always)]
+    fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+        Right(self.clone())
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
+    /// Returns `None` if the layout does not permit loading this as a value.
+    ///
+    /// This is an internal function; call `read_immediate` instead.
+    fn read_immediate_from_mplace_raw(
+        &self,
+        mplace: &MPlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::Provenance>>> {
+        if mplace.layout.is_unsized() {
+            // Don't touch unsized
+            return Ok(None);
+        }
+
+        let Some(alloc) = self.get_place_alloc(mplace)? else {
+            // zero-sized type can be left uninit
+            return Ok(Some(ImmTy::uninit(mplace.layout)));
+        };
+
+        // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
+        // However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned --
+        // and yet cannot be represented by an interpreter `Scalar`, since we have to handle the
+        // case where some of the bytes are initialized and others are not. So, we need an extra
+        // check that walks over the type of `mplace` to make sure it is truly correct to treat this
+        // like a `Scalar` (or `ScalarPair`).
+        Ok(match mplace.layout.abi {
+            Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
+                let size = s.size(self);
+                assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
+                let scalar = alloc.read_scalar(
+                    alloc_range(Size::ZERO, size),
+                    /*read_provenance*/ matches!(s, abi::Pointer(_)),
+                )?;
+                Some(ImmTy::from_scalar(scalar, mplace.layout))
+            }
+            Abi::ScalarPair(
+                abi::Scalar::Initialized { value: a, .. },
+                abi::Scalar::Initialized { value: b, .. },
+            ) => {
+                // We checked `ptr_align` above, so all fields will have the alignment they need.
+                // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
+                // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
+                let (a_size, b_size) = (a.size(self), b.size(self));
+                let b_offset = a_size.align_to(b.align(self).abi);
+                assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
+                let a_val = alloc.read_scalar(
+                    alloc_range(Size::ZERO, a_size),
+                    /*read_provenance*/ matches!(a, abi::Pointer(_)),
+                )?;
+                let b_val = alloc.read_scalar(
+                    alloc_range(b_offset, b_size),
+                    /*read_provenance*/ matches!(b, abi::Pointer(_)),
+                )?;
+                Some(ImmTy::from_immediate(Immediate::ScalarPair(a_val, b_val), mplace.layout))
+            }
+            _ => {
+                // Neither a scalar nor scalar pair.
+                None
+            }
+        })
+    }
+
+    /// Try returning an immediate for the operand. If the layout does not permit loading this as an
+    /// immediate, return where in memory we can find the data.
+    /// Note that for a given layout, this operation will either always return Left or Right!
+    /// succeed!  Whether it returns Left depends on whether the layout can be represented
+    /// in an `Immediate`, not on which data is stored there currently.
+    ///
+    /// This is an internal function that should not usually be used; call `read_immediate` instead.
+    /// ConstProp needs it, though.
+    pub fn read_immediate_raw(
+        &self,
+        src: &impl Readable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, Either<MPlaceTy<'tcx, M::Provenance>, ImmTy<'tcx, M::Provenance>>> {
+        Ok(match src.as_mplace_or_imm() {
+            Left(ref mplace) => {
+                if let Some(val) = self.read_immediate_from_mplace_raw(mplace)? {
+                    Right(val)
+                } else {
+                    Left(mplace.clone())
+                }
+            }
+            Right(val) => Right(val),
+        })
+    }
+
+    /// Read an immediate from a place, asserting that that is possible with the given layout.
+    ///
+    /// If this succeeds, the `ImmTy` is never `Uninit`.
+    #[inline(always)]
+    pub fn read_immediate(
+        &self,
+        op: &impl Readable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+        if !matches!(
+            op.layout().abi,
+            Abi::Scalar(abi::Scalar::Initialized { .. })
+                | Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
+        ) {
+            span_bug!(self.cur_span(), "primitive read not possible for type: {}", op.layout().ty);
+        }
+        let imm = self.read_immediate_raw(op)?.right().unwrap();
+        if matches!(*imm, Immediate::Uninit) {
+            throw_ub!(InvalidUninitBytes(None));
+        }
+        Ok(imm)
+    }
+
+    /// Read a scalar from a place
+    pub fn read_scalar(
+        &self,
+        op: &impl Readable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+        Ok(self.read_immediate(op)?.to_scalar())
+    }
+
+    // Pointer-sized reads are fairly common and need target layout access, so we wrap them in
+    // convenience functions.
+
+    /// Read a pointer from a place.
+    pub fn read_pointer(
+        &self,
+        op: &impl Readable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
+        self.read_scalar(op)?.to_pointer(self)
+    }
+    /// Read a pointer-sized unsigned integer from a place.
+    pub fn read_target_usize(
+        &self,
+        op: &impl Readable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, u64> {
+        self.read_scalar(op)?.to_target_usize(self)
+    }
+    /// Read a pointer-sized signed integer from a place.
+    pub fn read_target_isize(
+        &self,
+        op: &impl Readable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, i64> {
+        self.read_scalar(op)?.to_target_isize(self)
+    }
+
+    /// Turn the wide MPlace into a string (must already be dereferenced!)
+    pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> {
+        let len = mplace.len(self)?;
+        let bytes = self.read_bytes_ptr_strip_provenance(mplace.ptr(), Size::from_bytes(len))?;
+        let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
+        Ok(str)
+    }
+
+    /// Converts a repr(simd) operand into an operand where `place_index` accesses the SIMD elements.
+    /// Also returns the number of elements.
+    ///
+    /// Can (but does not always) trigger UB if `op` is uninitialized.
+    pub fn operand_to_simd(
+        &self,
+        op: &OpTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
+        // Basically we just transmute this place into an array following simd_size_and_type.
+        // This only works in memory, but repr(simd) types should never be immediates anyway.
+        assert!(op.layout.ty.is_simd());
+        match op.as_mplace_or_imm() {
+            Left(mplace) => self.mplace_to_simd(&mplace),
+            Right(imm) => match *imm {
+                Immediate::Uninit => {
+                    throw_ub!(InvalidUninitBytes(None))
+                }
+                Immediate::Scalar(..) | Immediate::ScalarPair(..) => {
+                    bug!("arrays/slices can never have Scalar/ScalarPair layout")
+                }
+            },
+        }
+    }
+
+    /// Read from a local of the current frame.
+    /// Will not access memory, instead an indirect `Operand` is returned.
+    ///
+    /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
+    /// OpTy from a local.
+    pub fn local_to_op(
+        &self,
+        local: mir::Local,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        let frame = self.frame();
+        let layout = self.layout_of_local(frame, local, layout)?;
+        let op = *frame.locals[local].access()?;
+        if matches!(op, Operand::Immediate(_)) {
+            assert!(!layout.is_unsized());
+        }
+        Ok(OpTy { op, layout })
+    }
+
+    /// Every place can be read from, so we can turn them into an operand.
+    /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
+    /// will never actually read from memory.
+    pub fn place_to_op(
+        &self,
+        place: &PlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        match place.as_mplace_or_local() {
+            Left(mplace) => Ok(mplace.into()),
+            Right((local, offset, locals_addr)) => {
+                debug_assert!(place.layout.is_sized()); // only sized locals can ever be `Place::Local`.
+                debug_assert_eq!(locals_addr, self.frame().locals_addr());
+                let base = self.local_to_op(local, None)?;
+                Ok(match offset {
+                    Some(offset) => base.offset(offset, place.layout, self)?,
+                    None => {
+                        // In the common case this hasn't been projected.
+                        debug_assert_eq!(place.layout, base.layout);
+                        base
+                    }
+                })
+            }
+        }
+    }
+
+    /// Evaluate a place with the goal of reading from it. This lets us sometimes
+    /// avoid allocations.
+    pub fn eval_place_to_op(
+        &self,
+        mir_place: mir::Place<'tcx>,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        // Do not use the layout passed in as argument if the base we are looking at
+        // here is not the entire place.
+        let layout = if mir_place.projection.is_empty() { layout } else { None };
+
+        let mut op = self.local_to_op(mir_place.local, layout)?;
+        // Using `try_fold` turned out to be bad for performance, hence the loop.
+        for elem in mir_place.projection.iter() {
+            op = self.project(&op, elem)?
+        }
+
+        trace!("eval_place_to_op: got {:?}", op);
+        // Sanity-check the type we ended up with.
+        if cfg!(debug_assertions) {
+            let normalized_place_ty = self
+                .instantiate_from_current_frame_and_normalize_erasing_regions(
+                    mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
+                )?;
+            if !mir_assign_valid_types(
+                *self.tcx,
+                self.param_env,
+                self.layout_of(normalized_place_ty)?,
+                op.layout,
+            ) {
+                span_bug!(
+                    self.cur_span(),
+                    "eval_place of a MIR place with type {} produced an interpreter operand with type {}",
+                    normalized_place_ty,
+                    op.layout.ty,
+                )
+            }
+        }
+        Ok(op)
+    }
+
+    /// Evaluate the operand, returning a place where you can then find the data.
+    /// If you already know the layout, you can save two table lookups
+    /// by passing it in here.
+    #[inline]
+    pub fn eval_operand(
+        &self,
+        mir_op: &mir::Operand<'tcx>,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        use rustc_middle::mir::Operand::*;
+        let op = match mir_op {
+            // FIXME: do some more logic on `move` to invalidate the old location
+            &Copy(place) | &Move(place) => self.eval_place_to_op(place, layout)?,
+
+            Constant(constant) => {
+                let c = self.instantiate_from_current_frame_and_normalize_erasing_regions(
+                    constant.const_,
+                )?;
+
+                // This can still fail:
+                // * During ConstProp, with `TooGeneric` or since the `required_consts` were not all
+                //   checked yet.
+                // * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
+                self.eval_mir_constant(&c, Some(constant.span), layout)?
+            }
+        };
+        trace!("{:?}: {:?}", mir_op, op);
+        Ok(op)
+    }
+
+    pub(crate) fn const_val_to_op(
+        &self,
+        val_val: mir::ConstValue<'tcx>,
+        ty: Ty<'tcx>,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        // Other cases need layout.
+        let adjust_scalar = |scalar| -> InterpResult<'tcx, _> {
+            Ok(match scalar {
+                Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size),
+                Scalar::Int(int) => Scalar::Int(int),
+            })
+        };
+        let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
+        let imm = match val_val {
+            mir::ConstValue::Indirect { alloc_id, offset } => {
+                // This is const data, no mutation allowed.
+                let ptr = self.global_base_pointer(Pointer::new(
+                    CtfeProvenance::from(alloc_id).as_immutable(),
+                    offset,
+                ))?;
+                return Ok(self.ptr_to_mplace(ptr.into(), layout).into());
+            }
+            mir::ConstValue::Scalar(x) => adjust_scalar(x)?.into(),
+            mir::ConstValue::ZeroSized => Immediate::Uninit,
+            mir::ConstValue::Slice { data, meta } => {
+                // This is const data, no mutation allowed.
+                let alloc_id = self.tcx.reserve_and_set_memory_alloc(data);
+                let ptr = Pointer::new(CtfeProvenance::from(alloc_id).as_immutable(), Size::ZERO);
+                Immediate::new_slice(self.global_base_pointer(ptr)?.into(), meta, self)
+            }
+        };
+        Ok(OpTy { op: Operand::Immediate(imm), layout })
+    }
+}
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+    use super::*;
+    use rustc_data_structures::static_assert_size;
+    // tidy-alphabetical-start
+    static_assert_size!(Immediate, 48);
+    static_assert_size!(ImmTy<'_>, 64);
+    static_assert_size!(Operand, 56);
+    static_assert_size!(OpTy<'_>, 72);
+    // tidy-alphabetical-end
+}
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
new file mode 100644
index 00000000000..475c533077a
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -0,0 +1,510 @@
+use rustc_apfloat::{Float, FloatConvert};
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{InterpResult, Scalar};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::{self, FloatTy, Ty};
+use rustc_span::symbol::sym;
+use rustc_target::abi::Abi;
+
+use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
+
+use crate::fluent_generated as fluent;
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Applies the binary operation `op` to the two operands and writes a tuple of the result
+    /// and a boolean signifying the potential overflow to the destination.
+    pub fn binop_with_overflow(
+        &mut self,
+        op: mir::BinOp,
+        left: &ImmTy<'tcx, M::Provenance>,
+        right: &ImmTy<'tcx, M::Provenance>,
+        dest: &PlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        let (val, overflowed) = self.overflowing_binary_op(op, left, right)?;
+        debug_assert_eq!(
+            Ty::new_tup(self.tcx.tcx, &[val.layout.ty, self.tcx.types.bool]),
+            dest.layout.ty,
+            "type mismatch for result of {op:?}",
+        );
+        // Write the result to `dest`.
+        if let Abi::ScalarPair(..) = dest.layout.abi {
+            // We can use the optimized path and avoid `place_field` (which might do
+            // `force_allocation`).
+            let pair = Immediate::ScalarPair(val.to_scalar(), Scalar::from_bool(overflowed));
+            self.write_immediate(pair, dest)?;
+        } else {
+            assert!(self.tcx.sess.opts.unstable_opts.randomize_layout);
+            // With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to
+            // do a component-wise write here. This code path is slower than the above because
+            // `place_field` will have to `force_allocate` locals here.
+            let val_field = self.project_field(dest, 0)?;
+            self.write_scalar(val.to_scalar(), &val_field)?;
+            let overflowed_field = self.project_field(dest, 1)?;
+            self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
+        }
+        Ok(())
+    }
+
+    /// Applies the binary operation `op` to the arguments and writes the result to the
+    /// destination.
+    pub fn binop_ignore_overflow(
+        &mut self,
+        op: mir::BinOp,
+        left: &ImmTy<'tcx, M::Provenance>,
+        right: &ImmTy<'tcx, M::Provenance>,
+        dest: &PlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        let val = self.wrapping_binary_op(op, left, right)?;
+        assert_eq!(val.layout.ty, dest.layout.ty, "type mismatch for result of {op:?}");
+        self.write_immediate(*val, dest)
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    fn binary_char_op(
+        &self,
+        bin_op: mir::BinOp,
+        l: char,
+        r: char,
+    ) -> (ImmTy<'tcx, M::Provenance>, bool) {
+        use rustc_middle::mir::BinOp::*;
+
+        let res = match bin_op {
+            Eq => l == r,
+            Ne => l != r,
+            Lt => l < r,
+            Le => l <= r,
+            Gt => l > r,
+            Ge => l >= r,
+            _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
+        };
+        (ImmTy::from_bool(res, *self.tcx), false)
+    }
+
+    fn binary_bool_op(
+        &self,
+        bin_op: mir::BinOp,
+        l: bool,
+        r: bool,
+    ) -> (ImmTy<'tcx, M::Provenance>, bool) {
+        use rustc_middle::mir::BinOp::*;
+
+        let res = match bin_op {
+            Eq => l == r,
+            Ne => l != r,
+            Lt => l < r,
+            Le => l <= r,
+            Gt => l > r,
+            Ge => l >= r,
+            BitAnd => l & r,
+            BitOr => l | r,
+            BitXor => l ^ r,
+            _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
+        };
+        (ImmTy::from_bool(res, *self.tcx), false)
+    }
+
+    fn binary_float_op<F: Float + FloatConvert<F> + Into<Scalar<M::Provenance>>>(
+        &self,
+        bin_op: mir::BinOp,
+        layout: TyAndLayout<'tcx>,
+        l: F,
+        r: F,
+    ) -> (ImmTy<'tcx, M::Provenance>, bool) {
+        use rustc_middle::mir::BinOp::*;
+
+        // Performs appropriate non-deterministic adjustments of NaN results.
+        let adjust_nan =
+            |f: F| -> F { if f.is_nan() { M::generate_nan(self, &[l, r]) } else { f } };
+
+        let val = match bin_op {
+            Eq => ImmTy::from_bool(l == r, *self.tcx),
+            Ne => ImmTy::from_bool(l != r, *self.tcx),
+            Lt => ImmTy::from_bool(l < r, *self.tcx),
+            Le => ImmTy::from_bool(l <= r, *self.tcx),
+            Gt => ImmTy::from_bool(l > r, *self.tcx),
+            Ge => ImmTy::from_bool(l >= r, *self.tcx),
+            Add => ImmTy::from_scalar(adjust_nan((l + r).value).into(), layout),
+            Sub => ImmTy::from_scalar(adjust_nan((l - r).value).into(), layout),
+            Mul => ImmTy::from_scalar(adjust_nan((l * r).value).into(), layout),
+            Div => ImmTy::from_scalar(adjust_nan((l / r).value).into(), layout),
+            Rem => ImmTy::from_scalar(adjust_nan((l % r).value).into(), layout),
+            _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
+        };
+        (val, false)
+    }
+
+    fn binary_int_op(
+        &self,
+        bin_op: mir::BinOp,
+        // passing in raw bits
+        l: u128,
+        left_layout: TyAndLayout<'tcx>,
+        r: u128,
+        right_layout: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
+        use rustc_middle::mir::BinOp::*;
+
+        let throw_ub_on_overflow = match bin_op {
+            AddUnchecked => Some(sym::unchecked_add),
+            SubUnchecked => Some(sym::unchecked_sub),
+            MulUnchecked => Some(sym::unchecked_mul),
+            ShlUnchecked => Some(sym::unchecked_shl),
+            ShrUnchecked => Some(sym::unchecked_shr),
+            _ => None,
+        };
+
+        // Shift ops can have an RHS with a different numeric type.
+        if matches!(bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked) {
+            let size = left_layout.size.bits();
+            // The shift offset is implicitly masked to the type size. (This is the one MIR operator
+            // that does *not* directly map to a single LLVM operation.) Compute how much we
+            // actually shift and whether there was an overflow due to shifting too much.
+            let (shift_amount, overflow) = if right_layout.abi.is_signed() {
+                let shift_amount = self.sign_extend(r, right_layout) as i128;
+                let overflow = shift_amount < 0 || shift_amount >= i128::from(size);
+                let masked_amount = (shift_amount as u128) % u128::from(size);
+                debug_assert_eq!(overflow, shift_amount != (masked_amount as i128));
+                (masked_amount, overflow)
+            } else {
+                let shift_amount = r;
+                let masked_amount = shift_amount % u128::from(size);
+                (masked_amount, shift_amount != masked_amount)
+            };
+            let shift_amount = u32::try_from(shift_amount).unwrap(); // we masked so this will always fit
+            // Compute the shifted result.
+            let result = if left_layout.abi.is_signed() {
+                let l = self.sign_extend(l, left_layout) as i128;
+                let result = match bin_op {
+                    Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
+                    Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(),
+                    _ => bug!(),
+                };
+                result as u128
+            } else {
+                match bin_op {
+                    Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
+                    Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(),
+                    _ => bug!(),
+                }
+            };
+            let truncated = self.truncate(result, left_layout);
+
+            if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
+                throw_ub_custom!(
+                    fluent::const_eval_overflow_shift,
+                    val = if right_layout.abi.is_signed() {
+                        (self.sign_extend(r, right_layout) as i128).to_string()
+                    } else {
+                        r.to_string()
+                    },
+                    name = intrinsic_name
+                );
+            }
+
+            return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
+        }
+
+        // For the remaining ops, the types must be the same on both sides
+        if left_layout.ty != right_layout.ty {
+            span_bug!(
+                self.cur_span(),
+                "invalid asymmetric binary op {bin_op:?}: {l:?} ({l_ty}), {r:?} ({r_ty})",
+                l_ty = left_layout.ty,
+                r_ty = right_layout.ty,
+            )
+        }
+
+        let size = left_layout.size;
+
+        // Operations that need special treatment for signed integers
+        if left_layout.abi.is_signed() {
+            let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
+                Lt => Some(i128::lt),
+                Le => Some(i128::le),
+                Gt => Some(i128::gt),
+                Ge => Some(i128::ge),
+                _ => None,
+            };
+            if let Some(op) = op {
+                let l = self.sign_extend(l, left_layout) as i128;
+                let r = self.sign_extend(r, right_layout) as i128;
+                return Ok((ImmTy::from_bool(op(&l, &r), *self.tcx), false));
+            }
+            let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
+                Div if r == 0 => throw_ub!(DivisionByZero),
+                Rem if r == 0 => throw_ub!(RemainderByZero),
+                Div => Some(i128::overflowing_div),
+                Rem => Some(i128::overflowing_rem),
+                Add | AddUnchecked => Some(i128::overflowing_add),
+                Sub | SubUnchecked => Some(i128::overflowing_sub),
+                Mul | MulUnchecked => Some(i128::overflowing_mul),
+                _ => None,
+            };
+            if let Some(op) = op {
+                let l = self.sign_extend(l, left_layout) as i128;
+                let r = self.sign_extend(r, right_layout) as i128;
+
+                // We need a special check for overflowing Rem and Div since they are *UB*
+                // on overflow, which can happen with "int_min $OP -1".
+                if matches!(bin_op, Rem | Div) {
+                    if l == size.signed_int_min() && r == -1 {
+                        if bin_op == Rem {
+                            throw_ub!(RemainderOverflow)
+                        } else {
+                            throw_ub!(DivisionOverflow)
+                        }
+                    }
+                }
+
+                let (result, oflo) = op(l, r);
+                // This may be out-of-bounds for the result type, so we have to truncate ourselves.
+                // If that truncation loses any information, we have an overflow.
+                let result = result as u128;
+                let truncated = self.truncate(result, left_layout);
+                let overflow = oflo || self.sign_extend(truncated, left_layout) != result;
+                if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
+                    throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
+                }
+                return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
+            }
+        }
+
+        let val = match bin_op {
+            Eq => ImmTy::from_bool(l == r, *self.tcx),
+            Ne => ImmTy::from_bool(l != r, *self.tcx),
+
+            Lt => ImmTy::from_bool(l < r, *self.tcx),
+            Le => ImmTy::from_bool(l <= r, *self.tcx),
+            Gt => ImmTy::from_bool(l > r, *self.tcx),
+            Ge => ImmTy::from_bool(l >= r, *self.tcx),
+
+            BitOr => ImmTy::from_uint(l | r, left_layout),
+            BitAnd => ImmTy::from_uint(l & r, left_layout),
+            BitXor => ImmTy::from_uint(l ^ r, left_layout),
+
+            Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => {
+                assert!(!left_layout.abi.is_signed());
+                let op: fn(u128, u128) -> (u128, bool) = match bin_op {
+                    Add | AddUnchecked => u128::overflowing_add,
+                    Sub | SubUnchecked => u128::overflowing_sub,
+                    Mul | MulUnchecked => u128::overflowing_mul,
+                    Div if r == 0 => throw_ub!(DivisionByZero),
+                    Rem if r == 0 => throw_ub!(RemainderByZero),
+                    Div => u128::overflowing_div,
+                    Rem => u128::overflowing_rem,
+                    _ => bug!(),
+                };
+                let (result, oflo) = op(l, r);
+                // Truncate to target type.
+                // If that truncation loses any information, we have an overflow.
+                let truncated = self.truncate(result, left_layout);
+                let overflow = oflo || truncated != result;
+                if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
+                    throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
+                }
+                return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
+            }
+
+            _ => span_bug!(
+                self.cur_span(),
+                "invalid binary op {:?}: {:?}, {:?} (both {})",
+                bin_op,
+                l,
+                r,
+                right_layout.ty,
+            ),
+        };
+
+        Ok((val, false))
+    }
+
+    fn binary_ptr_op(
+        &self,
+        bin_op: mir::BinOp,
+        left: &ImmTy<'tcx, M::Provenance>,
+        right: &ImmTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
+        use rustc_middle::mir::BinOp::*;
+
+        match bin_op {
+            // Pointer ops that are always supported.
+            Offset => {
+                let ptr = left.to_scalar().to_pointer(self)?;
+                let offset_count = right.to_scalar().to_target_isize(self)?;
+                let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty;
+
+                // We cannot overflow i64 as a type's size must be <= isize::MAX.
+                let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
+                // The computed offset, in bytes, must not overflow an isize.
+                // `checked_mul` enforces a too small bound, but no actual allocation can be big enough for
+                // the difference to be noticeable.
+                let offset_bytes =
+                    offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
+
+                let offset_ptr = self.ptr_offset_inbounds(ptr, offset_bytes)?;
+                Ok((
+                    ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout),
+                    false,
+                ))
+            }
+
+            // Fall back to machine hook so Miri can support more pointer ops.
+            _ => M::binary_ptr_op(self, bin_op, left, right),
+        }
+    }
+
+    /// Returns the result of the specified operation, and whether it overflowed.
+    pub fn overflowing_binary_op(
+        &self,
+        bin_op: mir::BinOp,
+        left: &ImmTy<'tcx, M::Provenance>,
+        right: &ImmTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
+        trace!(
+            "Running binary op {:?}: {:?} ({}), {:?} ({})",
+            bin_op,
+            *left,
+            left.layout.ty,
+            *right,
+            right.layout.ty
+        );
+
+        match left.layout.ty.kind() {
+            ty::Char => {
+                assert_eq!(left.layout.ty, right.layout.ty);
+                let left = left.to_scalar();
+                let right = right.to_scalar();
+                Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
+            }
+            ty::Bool => {
+                assert_eq!(left.layout.ty, right.layout.ty);
+                let left = left.to_scalar();
+                let right = right.to_scalar();
+                Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
+            }
+            ty::Float(fty) => {
+                assert_eq!(left.layout.ty, right.layout.ty);
+                let layout = left.layout;
+                let left = left.to_scalar();
+                let right = right.to_scalar();
+                Ok(match fty {
+                    FloatTy::F16 => unimplemented!("f16_f128"),
+                    FloatTy::F32 => {
+                        self.binary_float_op(bin_op, layout, left.to_f32()?, right.to_f32()?)
+                    }
+                    FloatTy::F64 => {
+                        self.binary_float_op(bin_op, layout, left.to_f64()?, right.to_f64()?)
+                    }
+                    FloatTy::F128 => unimplemented!("f16_f128"),
+                })
+            }
+            _ if left.layout.ty.is_integral() => {
+                // the RHS type can be different, e.g. for shifts -- but it has to be integral, too
+                assert!(
+                    right.layout.ty.is_integral(),
+                    "Unexpected types for BinOp: {} {:?} {}",
+                    left.layout.ty,
+                    bin_op,
+                    right.layout.ty
+                );
+
+                let l = left.to_scalar().to_bits(left.layout.size)?;
+                let r = right.to_scalar().to_bits(right.layout.size)?;
+                self.binary_int_op(bin_op, l, left.layout, r, right.layout)
+            }
+            _ if left.layout.ty.is_any_ptr() => {
+                // The RHS type must be a `pointer` *or an integer type* (for `Offset`).
+                // (Even when both sides are pointers, their type might differ, see issue #91636)
+                assert!(
+                    right.layout.ty.is_any_ptr() || right.layout.ty.is_integral(),
+                    "Unexpected types for BinOp: {} {:?} {}",
+                    left.layout.ty,
+                    bin_op,
+                    right.layout.ty
+                );
+
+                self.binary_ptr_op(bin_op, left, right)
+            }
+            _ => span_bug!(
+                self.cur_span(),
+                "Invalid MIR: bad LHS type for binop: {}",
+                left.layout.ty
+            ),
+        }
+    }
+
+    #[inline]
+    pub fn wrapping_binary_op(
+        &self,
+        bin_op: mir::BinOp,
+        left: &ImmTy<'tcx, M::Provenance>,
+        right: &ImmTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+        let (val, _overflow) = self.overflowing_binary_op(bin_op, left, right)?;
+        Ok(val)
+    }
+
+    /// Returns the result of the specified operation, whether it overflowed, and
+    /// the result type.
+    pub fn overflowing_unary_op(
+        &self,
+        un_op: mir::UnOp,
+        val: &ImmTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
+        use rustc_middle::mir::UnOp::*;
+
+        let layout = val.layout;
+        let val = val.to_scalar();
+        trace!("Running unary op {:?}: {:?} ({})", un_op, val, layout.ty);
+
+        match layout.ty.kind() {
+            ty::Bool => {
+                let val = val.to_bool()?;
+                let res = match un_op {
+                    Not => !val,
+                    _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
+                };
+                Ok((ImmTy::from_bool(res, *self.tcx), false))
+            }
+            ty::Float(fty) => {
+                // No NaN adjustment here, `-` is a bitwise operation!
+                let res = match (un_op, fty) {
+                    (Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
+                    (Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
+                    _ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
+                };
+                Ok((ImmTy::from_scalar(res, layout), false))
+            }
+            _ => {
+                assert!(layout.ty.is_integral());
+                let val = val.to_bits(layout.size)?;
+                let (res, overflow) = match un_op {
+                    Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
+                    Neg => {
+                        // arithmetic negation
+                        assert!(layout.abi.is_signed());
+                        let val = self.sign_extend(val, layout) as i128;
+                        let (res, overflow) = val.overflowing_neg();
+                        let res = res as u128;
+                        // Truncate to target type.
+                        // If that truncation loses any information, we have an overflow.
+                        let truncated = self.truncate(res, layout);
+                        (truncated, overflow || self.sign_extend(truncated, layout) != res)
+                    }
+                };
+                Ok((ImmTy::from_uint(res, layout), overflow))
+            }
+        }
+    }
+
+    #[inline]
+    pub fn wrapping_unary_op(
+        &self,
+        un_op: mir::UnOp,
+        val: &ImmTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+        let (val, _overflow) = self.overflowing_unary_op(un_op, val)?;
+        Ok(val)
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
new file mode 100644
index 00000000000..1a2f1194f89
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -0,0 +1,1072 @@
+//! Computations on places -- field projections, going from mir::Place, and writing
+//! into a place.
+//! All high-level functions to write to memory work on places as destinations.
+
+use std::assert_matches::assert_matches;
+
+use either::{Either, Left, Right};
+
+use rustc_ast::Mutability;
+use rustc_middle::mir;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::Ty;
+use rustc_target::abi::{Abi, Align, HasDataLayout, Size};
+
+use super::{
+    alloc_range, mir_assign_valid_types, AllocRef, AllocRefMut, CheckAlignMsg, CtfeProvenance,
+    ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, Misalignment, OffsetMode, OpTy,
+    Operand, Pointer, PointerArithmetic, Projectable, Provenance, Readable, Scalar,
+};
+
+#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
+/// Information required for the sound usage of a `MemPlace`.
+pub enum MemPlaceMeta<Prov: Provenance = CtfeProvenance> {
+    /// The unsized payload (e.g. length for slices or vtable pointer for trait objects).
+    Meta(Scalar<Prov>),
+    /// `Sized` types or unsized `extern type`
+    None,
+}
+
+impl<Prov: Provenance> MemPlaceMeta<Prov> {
+    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+    pub fn unwrap_meta(self) -> Scalar<Prov> {
+        match self {
+            Self::Meta(s) => s,
+            Self::None => {
+                bug!("expected wide pointer extra data (e.g. slice length or trait object vtable)")
+            }
+        }
+    }
+
+    #[inline(always)]
+    pub fn has_meta(self) -> bool {
+        match self {
+            Self::Meta(_) => true,
+            Self::None => false,
+        }
+    }
+}
+
+#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
+pub(super) struct MemPlace<Prov: Provenance = CtfeProvenance> {
+    /// The pointer can be a pure integer, with the `None` provenance.
+    pub ptr: Pointer<Option<Prov>>,
+    /// Metadata for unsized places. Interpretation is up to the type.
+    /// Must not be present for sized types, but can be missing for unsized types
+    /// (e.g., `extern type`).
+    pub meta: MemPlaceMeta<Prov>,
+    /// Stores whether this place was created based on a sufficiently aligned pointer.
+    misaligned: Option<Misalignment>,
+}
+
+impl<Prov: Provenance> MemPlace<Prov> {
+    /// Adjust the provenance of the main pointer (metadata is unaffected).
+    pub fn map_provenance(self, f: impl FnOnce(Prov) -> Prov) -> Self {
+        MemPlace { ptr: self.ptr.map_provenance(|p| p.map(f)), ..self }
+    }
+
+    /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
+    #[inline]
+    pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> {
+        Immediate::new_pointer_with_meta(self.ptr, self.meta, cx)
+    }
+
+    #[inline]
+    // Not called `offset_with_meta` to avoid confusion with the trait method.
+    fn offset_with_meta_<'mir, 'tcx, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        self,
+        offset: Size,
+        mode: OffsetMode,
+        meta: MemPlaceMeta<Prov>,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, Self> {
+        debug_assert!(
+            !meta.has_meta() || self.meta.has_meta(),
+            "cannot use `offset_with_meta` to add metadata to a place"
+        );
+        if offset > ecx.data_layout().max_size_of_val() {
+            throw_ub!(PointerArithOverflow);
+        }
+        let ptr = match mode {
+            OffsetMode::Inbounds => {
+                ecx.ptr_offset_inbounds(self.ptr, offset.bytes().try_into().unwrap())?
+            }
+            OffsetMode::Wrapping => self.ptr.wrapping_offset(offset, ecx),
+        };
+        Ok(MemPlace { ptr, meta, misaligned: self.misaligned })
+    }
+}
+
+/// A MemPlace with its layout. Constructing it is only possible in this module.
+#[derive(Clone, Hash, Eq, PartialEq)]
+pub struct MPlaceTy<'tcx, Prov: Provenance = CtfeProvenance> {
+    mplace: MemPlace<Prov>,
+    pub layout: TyAndLayout<'tcx>,
+}
+
+impl<Prov: Provenance> std::fmt::Debug for MPlaceTy<'_, Prov> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        // Printing `layout` results in too much noise; just print a nice version of the type.
+        f.debug_struct("MPlaceTy")
+            .field("mplace", &self.mplace)
+            .field("ty", &format_args!("{}", self.layout.ty))
+            .finish()
+    }
+}
+
+impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
+    /// Produces a MemPlace that works for ZST but nothing else.
+    /// Conceptually this is a new allocation, but it doesn't actually create an allocation so you
+    /// don't need to worry about memory leaks.
+    #[inline]
+    pub fn fake_alloc_zst(layout: TyAndLayout<'tcx>) -> Self {
+        assert!(layout.is_zst());
+        let align = layout.align.abi;
+        let ptr = Pointer::from_addr_invalid(align.bytes()); // no provenance, absolute address
+        MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None, misaligned: None }, layout }
+    }
+
+    /// Adjust the provenance of the main pointer (metadata is unaffected).
+    pub fn map_provenance(self, f: impl FnOnce(Prov) -> Prov) -> Self {
+        MPlaceTy { mplace: self.mplace.map_provenance(f), ..self }
+    }
+
+    #[inline(always)]
+    pub(super) fn mplace(&self) -> &MemPlace<Prov> {
+        &self.mplace
+    }
+
+    #[inline(always)]
+    pub fn ptr(&self) -> Pointer<Option<Prov>> {
+        self.mplace.ptr
+    }
+
+    #[inline(always)]
+    pub fn to_ref(&self, cx: &impl HasDataLayout) -> Immediate<Prov> {
+        self.mplace.to_ref(cx)
+    }
+}
+
+impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    #[inline(always)]
+    fn meta(&self) -> MemPlaceMeta<Prov> {
+        self.mplace.meta
+    }
+
+    fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        offset: Size,
+        mode: OffsetMode,
+        meta: MemPlaceMeta<Prov>,
+        layout: TyAndLayout<'tcx>,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, Self> {
+        Ok(MPlaceTy { mplace: self.mplace.offset_with_meta_(offset, mode, meta, ecx)?, layout })
+    }
+
+    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        Ok(self.clone().into())
+    }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub(super) enum Place<Prov: Provenance = CtfeProvenance> {
+    /// A place referring to a value allocated in the `Memory` system.
+    Ptr(MemPlace<Prov>),
+
+    /// To support alloc-free locals, we are able to write directly to a local. The offset indicates
+    /// where in the local this place is located; if it is `None`, no projection has been applied.
+    /// Such projections are meaningful even if the offset is 0, since they can change layouts.
+    /// (Without that optimization, we'd just always be a `MemPlace`.)
+    /// `Local` places always refer to the current stack frame, so they are unstable under
+    /// function calls/returns and switching betweens stacks of different threads!
+    /// We carry around the address of the `locals` buffer of the correct stack frame as a sanity
+    /// chec to be able to catch some cases of using a dangling `Place`.
+    ///
+    /// This variant shall not be used for unsized types -- those must always live in memory.
+    Local { local: mir::Local, offset: Option<Size>, locals_addr: usize },
+}
+
+/// An evaluated place, together with its type.
+///
+/// This may reference a stack frame by its index, so `PlaceTy` should generally not be kept around
+/// for longer than a single operation. Popping and then pushing a stack frame can make `PlaceTy`
+/// point to the wrong destination. If the interpreter has multiple stacks, stack switching will
+/// also invalidate a `PlaceTy`.
+#[derive(Clone)]
+pub struct PlaceTy<'tcx, Prov: Provenance = CtfeProvenance> {
+    place: Place<Prov>, // Keep this private; it helps enforce invariants.
+    pub layout: TyAndLayout<'tcx>,
+}
+
+impl<Prov: Provenance> std::fmt::Debug for PlaceTy<'_, Prov> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        // Printing `layout` results in too much noise; just print a nice version of the type.
+        f.debug_struct("PlaceTy")
+            .field("place", &self.place)
+            .field("ty", &format_args!("{}", self.layout.ty))
+            .finish()
+    }
+}
+
+impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
+    #[inline(always)]
+    fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
+        PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout }
+    }
+}
+
+impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
+    #[inline(always)]
+    pub(super) fn place(&self) -> &Place<Prov> {
+        &self.place
+    }
+
+    /// A place is either an mplace or some local.
+    #[inline(always)]
+    pub fn as_mplace_or_local(
+        &self,
+    ) -> Either<MPlaceTy<'tcx, Prov>, (mir::Local, Option<Size>, usize)> {
+        match self.place {
+            Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout }),
+            Place::Local { local, offset, locals_addr } => Right((local, offset, locals_addr)),
+        }
+    }
+
+    #[inline(always)]
+    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+    pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
+        self.as_mplace_or_local().left().unwrap_or_else(|| {
+            bug!(
+                "PlaceTy of type {} was a local when it was expected to be an MPlace",
+                self.layout.ty
+            )
+        })
+    }
+}
+
+impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    #[inline]
+    fn meta(&self) -> MemPlaceMeta<Prov> {
+        match self.as_mplace_or_local() {
+            Left(mplace) => mplace.meta(),
+            Right(_) => {
+                debug_assert!(self.layout.is_sized(), "unsized locals should live in memory");
+                MemPlaceMeta::None
+            }
+        }
+    }
+
+    fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        offset: Size,
+        mode: OffsetMode,
+        meta: MemPlaceMeta<Prov>,
+        layout: TyAndLayout<'tcx>,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, Self> {
+        Ok(match self.as_mplace_or_local() {
+            Left(mplace) => mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into(),
+            Right((local, old_offset, locals_addr)) => {
+                debug_assert!(layout.is_sized(), "unsized locals should live in memory");
+                assert_matches!(meta, MemPlaceMeta::None); // we couldn't store it anyway...
+                // `Place::Local` are always in-bounds of their surrounding local, so we can just
+                // check directly if this remains in-bounds. This cannot actually be violated since
+                // projections are type-checked and bounds-checked.
+                assert!(offset + layout.size <= self.layout.size);
+
+                let new_offset = Size::from_bytes(
+                    ecx.data_layout()
+                        .offset(old_offset.unwrap_or(Size::ZERO).bytes(), offset.bytes())?,
+                );
+
+                PlaceTy {
+                    place: Place::Local { local, offset: Some(new_offset), locals_addr },
+                    layout,
+                }
+            }
+        })
+    }
+
+    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        ecx.place_to_op(self)
+    }
+}
+
+// These are defined here because they produce a place.
+impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
+    #[inline(always)]
+    pub fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+        match self.op() {
+            Operand::Indirect(mplace) => Left(MPlaceTy { mplace: *mplace, layout: self.layout }),
+            Operand::Immediate(imm) => Right(ImmTy::from_immediate(*imm, self.layout)),
+        }
+    }
+
+    #[inline(always)]
+    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+    pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
+        self.as_mplace_or_imm().left().unwrap_or_else(|| {
+            bug!(
+                "OpTy of type {} was immediate when it was expected to be an MPlace",
+                self.layout.ty
+            )
+        })
+    }
+}
+
+/// The `Weiteable` trait describes interpreter values that can be written to.
+pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
+    fn as_mplace_or_local(
+        &self,
+    ) -> Either<MPlaceTy<'tcx, Prov>, (mir::Local, Option<Size>, usize, TyAndLayout<'tcx>)>;
+
+    fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &mut InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>>;
+}
+
+impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
+    #[inline(always)]
+    fn as_mplace_or_local(
+        &self,
+    ) -> Either<MPlaceTy<'tcx, Prov>, (mir::Local, Option<Size>, usize, TyAndLayout<'tcx>)> {
+        self.as_mplace_or_local()
+            .map_right(|(local, offset, locals_addr)| (local, offset, locals_addr, self.layout))
+    }
+
+    #[inline(always)]
+    fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &mut InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
+        ecx.force_allocation(self)
+    }
+}
+
+impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+    #[inline(always)]
+    fn as_mplace_or_local(
+        &self,
+    ) -> Either<MPlaceTy<'tcx, Prov>, (mir::Local, Option<Size>, usize, TyAndLayout<'tcx>)> {
+        Left(self.clone())
+    }
+
+    #[inline(always)]
+    fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        _ecx: &mut InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
+        Ok(self.clone())
+    }
+}
+
+// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
+impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
+where
+    Prov: Provenance,
+    M: Machine<'mir, 'tcx, Provenance = Prov>,
+{
+    pub fn ptr_with_meta_to_mplace(
+        &self,
+        ptr: Pointer<Option<M::Provenance>>,
+        meta: MemPlaceMeta<M::Provenance>,
+        layout: TyAndLayout<'tcx>,
+    ) -> MPlaceTy<'tcx, M::Provenance> {
+        let misaligned = self.is_ptr_misaligned(ptr, layout.align.abi);
+        MPlaceTy { mplace: MemPlace { ptr, meta, misaligned }, layout }
+    }
+
+    pub fn ptr_to_mplace(
+        &self,
+        ptr: Pointer<Option<M::Provenance>>,
+        layout: TyAndLayout<'tcx>,
+    ) -> MPlaceTy<'tcx, M::Provenance> {
+        assert!(layout.is_sized());
+        self.ptr_with_meta_to_mplace(ptr, MemPlaceMeta::None, layout)
+    }
+
+    /// Take a value, which represents a (thin or wide) reference, and make it a place.
+    /// Alignment is just based on the type. This is the inverse of `mplace_to_ref()`.
+    ///
+    /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not
+    /// want to ever use the place for memory access!
+    /// Generally prefer `deref_pointer`.
+    pub fn ref_to_mplace(
+        &self,
+        val: &ImmTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+        let pointee_type =
+            val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty;
+        let layout = self.layout_of(pointee_type)?;
+        let (ptr, meta) = val.to_scalar_and_meta();
+
+        // `ref_to_mplace` is called on raw pointers even if they don't actually get dereferenced;
+        // we hence can't call `size_and_align_of` since that asserts more validity than we want.
+        let ptr = ptr.to_pointer(self)?;
+        Ok(self.ptr_with_meta_to_mplace(ptr, meta, layout))
+    }
+
+    /// Turn a mplace into a (thin or wide) mutable raw pointer, pointing to the same space.
+    /// `align` information is lost!
+    /// This is the inverse of `ref_to_mplace`.
+    pub fn mplace_to_ref(
+        &self,
+        mplace: &MPlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+        let imm = mplace.mplace.to_ref(self);
+        let layout = self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, mplace.layout.ty))?;
+        Ok(ImmTy::from_immediate(imm, layout))
+    }
+
+    /// Take an operand, representing a pointer, and dereference it to a place.
+    /// Corresponds to the `*` operator in Rust.
+    #[instrument(skip(self), level = "debug")]
+    pub fn deref_pointer(
+        &self,
+        src: &impl Readable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+        let val = self.read_immediate(src)?;
+        trace!("deref to {} on {:?}", val.layout.ty, *val);
+
+        if val.layout.ty.is_box() {
+            // Derefer should have removed all Box derefs
+            bug!("dereferencing {}", val.layout.ty);
+        }
+
+        let mplace = self.ref_to_mplace(&val)?;
+        Ok(mplace)
+    }
+
+    #[inline]
+    pub(super) fn get_place_alloc(
+        &self,
+        mplace: &MPlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
+    {
+        let (size, _align) = self
+            .size_and_align_of_mplace(mplace)?
+            .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
+        // We check alignment separately, and *after* checking everything else.
+        // If an access is both OOB and misaligned, we want to see the bounds error.
+        let a = self.get_ptr_alloc(mplace.ptr(), size)?;
+        self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn)?;
+        Ok(a)
+    }
+
+    #[inline]
+    pub(super) fn get_place_alloc_mut(
+        &mut self,
+        mplace: &MPlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
+    {
+        let (size, _align) = self
+            .size_and_align_of_mplace(mplace)?
+            .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
+        // We check alignment separately, and raise that error *after* checking everything else.
+        // If an access is both OOB and misaligned, we want to see the bounds error.
+        // However we have to call `check_misalign` first to make the borrow checker happy.
+        let misalign_err = self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn);
+        let a = self.get_ptr_alloc_mut(mplace.ptr(), size)?;
+        misalign_err?;
+        Ok(a)
+    }
+
+    /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
+    /// Also returns the number of elements.
+    pub fn mplace_to_simd(
+        &self,
+        mplace: &MPlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
+        // Basically we just transmute this place into an array following simd_size_and_type.
+        // (Transmuting is okay since this is an in-memory place. We also double-check the size
+        // stays the same.)
+        let (len, e_ty) = mplace.layout.ty.simd_size_and_type(*self.tcx);
+        let array = Ty::new_array(self.tcx.tcx, e_ty, len);
+        let layout = self.layout_of(array)?;
+        let mplace = mplace.transmute(layout, self)?;
+        Ok((mplace, len))
+    }
+
+    /// Turn a local in the current frame into a place.
+    pub fn local_to_place(
+        &self,
+        local: mir::Local,
+    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+        // Other parts of the system rely on `Place::Local` never being unsized.
+        // So we eagerly check here if this local has an MPlace, and if yes we use it.
+        let frame = self.frame();
+        let layout = self.layout_of_local(frame, local, None)?;
+        let place = if layout.is_sized() {
+            // We can just always use the `Local` for sized values.
+            Place::Local { local, offset: None, locals_addr: frame.locals_addr() }
+        } else {
+            // Unsized `Local` isn't okay (we cannot store the metadata).
+            match frame.locals[local].access()? {
+                Operand::Immediate(_) => bug!(),
+                Operand::Indirect(mplace) => Place::Ptr(*mplace),
+            }
+        };
+        Ok(PlaceTy { place, layout })
+    }
+
+    /// Computes a place. You should only use this if you intend to write into this
+    /// place; for reading, a more efficient alternative is `eval_place_to_op`.
+    #[instrument(skip(self), level = "debug")]
+    pub fn eval_place(
+        &self,
+        mir_place: mir::Place<'tcx>,
+    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+        let mut place = self.local_to_place(mir_place.local)?;
+        // Using `try_fold` turned out to be bad for performance, hence the loop.
+        for elem in mir_place.projection.iter() {
+            place = self.project(&place, elem)?
+        }
+
+        trace!("{:?}", self.dump_place(&place));
+        // Sanity-check the type we ended up with.
+        if cfg!(debug_assertions) {
+            let normalized_place_ty = self
+                .instantiate_from_current_frame_and_normalize_erasing_regions(
+                    mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
+                )?;
+            if !mir_assign_valid_types(
+                *self.tcx,
+                self.param_env,
+                self.layout_of(normalized_place_ty)?,
+                place.layout,
+            ) {
+                span_bug!(
+                    self.cur_span(),
+                    "eval_place of a MIR place with type {} produced an interpreter place with type {}",
+                    normalized_place_ty,
+                    place.layout.ty,
+                )
+            }
+        }
+        Ok(place)
+    }
+
+    /// Write an immediate to a place
+    #[inline(always)]
+    #[instrument(skip(self), level = "debug")]
+    pub fn write_immediate(
+        &mut self,
+        src: Immediate<M::Provenance>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        self.write_immediate_no_validate(src, dest)?;
+
+        if M::enforce_validity(self, dest.layout()) {
+            // Data got changed, better make sure it matches the type!
+            self.validate_operand(&dest.to_op(self)?)?;
+        }
+
+        Ok(())
+    }
+
+    /// Write a scalar to a place
+    #[inline(always)]
+    pub fn write_scalar(
+        &mut self,
+        val: impl Into<Scalar<M::Provenance>>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        self.write_immediate(Immediate::Scalar(val.into()), dest)
+    }
+
+    /// Write a pointer to a place
+    #[inline(always)]
+    pub fn write_pointer(
+        &mut self,
+        ptr: impl Into<Pointer<Option<M::Provenance>>>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest)
+    }
+
+    /// Write an immediate to a place.
+    /// If you use this you are responsible for validating that things got copied at the
+    /// right type.
+    fn write_immediate_no_validate(
+        &mut self,
+        src: Immediate<M::Provenance>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        assert!(dest.layout().is_sized(), "Cannot write unsized immediate data");
+
+        // See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
+        // but not factored as a separate function.
+        let mplace = match dest.as_mplace_or_local() {
+            Right((local, offset, locals_addr, layout)) => {
+                if offset.is_some() {
+                    // This has been projected to a part of this local. We could have complicated
+                    // logic to still keep this local as an `Operand`... but it's much easier to
+                    // just fall back to the indirect path.
+                    dest.force_mplace(self)?
+                } else {
+                    debug_assert_eq!(locals_addr, self.frame().locals_addr());
+                    match self.frame_mut().locals[local].access_mut()? {
+                        Operand::Immediate(local_val) => {
+                            // Local can be updated in-place.
+                            *local_val = src;
+                            // Double-check that the value we are storing and the local fit to each other.
+                            // (*After* doing the update for borrow checker reasons.)
+                            if cfg!(debug_assertions) {
+                                let local_layout =
+                                    self.layout_of_local(&self.frame(), local, None)?;
+                                match (src, local_layout.abi) {
+                                    (Immediate::Scalar(scalar), Abi::Scalar(s)) => {
+                                        assert_eq!(scalar.size(), s.size(self))
+                                    }
+                                    (
+                                        Immediate::ScalarPair(a_val, b_val),
+                                        Abi::ScalarPair(a, b),
+                                    ) => {
+                                        assert_eq!(a_val.size(), a.size(self));
+                                        assert_eq!(b_val.size(), b.size(self));
+                                    }
+                                    (Immediate::Uninit, _) => {}
+                                    (src, abi) => {
+                                        bug!(
+                                            "value {src:?} cannot be written into local with type {} (ABI {abi:?})",
+                                            local_layout.ty
+                                        )
+                                    }
+                                };
+                            }
+                            return Ok(());
+                        }
+                        Operand::Indirect(mplace) => {
+                            // The local is in memory, go on below.
+                            MPlaceTy { mplace: *mplace, layout }
+                        }
+                    }
+                }
+            }
+            Left(mplace) => mplace, // already referring to memory
+        };
+
+        // This is already in memory, write there.
+        self.write_immediate_to_mplace_no_validate(src, mplace.layout, mplace.mplace)
+    }
+
+    /// Write an immediate to memory.
+    /// If you use this you are responsible for validating that things got copied at the
+    /// right layout.
+    fn write_immediate_to_mplace_no_validate(
+        &mut self,
+        value: Immediate<M::Provenance>,
+        layout: TyAndLayout<'tcx>,
+        dest: MemPlace<M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        // Note that it is really important that the type here is the right one, and matches the
+        // type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
+        // to handle padding properly, which is only correct if we never look at this data with the
+        // wrong type.
+
+        let tcx = *self.tcx;
+        let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout })? else {
+            // zero-sized access
+            return Ok(());
+        };
+
+        match value {
+            Immediate::Scalar(scalar) => {
+                let Abi::Scalar(s) = layout.abi else {
+                    span_bug!(
+                        self.cur_span(),
+                        "write_immediate_to_mplace: invalid Scalar layout: {layout:#?}",
+                    )
+                };
+                let size = s.size(&tcx);
+                assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
+                alloc.write_scalar(alloc_range(Size::ZERO, size), scalar)
+            }
+            Immediate::ScalarPair(a_val, b_val) => {
+                let Abi::ScalarPair(a, b) = layout.abi else {
+                    span_bug!(
+                        self.cur_span(),
+                        "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
+                        layout
+                    )
+                };
+                let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
+                let b_offset = a_size.align_to(b.align(&tcx).abi);
+                assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
+
+                // It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
+                // but that does not work: We could be a newtype around a pair, then the
+                // fields do not match the `ScalarPair` components.
+
+                alloc.write_scalar(alloc_range(Size::ZERO, a_size), a_val)?;
+                alloc.write_scalar(alloc_range(b_offset, b_size), b_val)
+            }
+            Immediate::Uninit => alloc.write_uninit(),
+        }
+    }
+
+    pub fn write_uninit(
+        &mut self,
+        dest: &impl Writeable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        let mplace = match dest.as_mplace_or_local() {
+            Left(mplace) => mplace,
+            Right((local, offset, locals_addr, layout)) => {
+                if offset.is_some() {
+                    // This has been projected to a part of this local. We could have complicated
+                    // logic to still keep this local as an `Operand`... but it's much easier to
+                    // just fall back to the indirect path.
+                    // FIXME: share the logic with `write_immediate_no_validate`.
+                    dest.force_mplace(self)?
+                } else {
+                    debug_assert_eq!(locals_addr, self.frame().locals_addr());
+                    match self.frame_mut().locals[local].access_mut()? {
+                        Operand::Immediate(local) => {
+                            *local = Immediate::Uninit;
+                            return Ok(());
+                        }
+                        Operand::Indirect(mplace) => {
+                            // The local is in memory, go on below.
+                            MPlaceTy { mplace: *mplace, layout }
+                        }
+                    }
+                }
+            }
+        };
+        let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
+            // Zero-sized access
+            return Ok(());
+        };
+        alloc.write_uninit()?;
+        Ok(())
+    }
+
+    /// Copies the data from an operand to a place.
+    /// The layouts of the `src` and `dest` may disagree.
+    /// Does not perform validation of the destination.
+    /// The only known use case for this function is checking the return
+    /// value of a static during stack frame popping.
+    #[inline(always)]
+    pub(super) fn copy_op_no_dest_validation(
+        &mut self,
+        src: &impl Readable<'tcx, M::Provenance>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        self.copy_op_inner(
+            src, dest, /* allow_transmute */ true, /* validate_dest */ false,
+        )
+    }
+
+    /// Copies the data from an operand to a place.
+    /// The layouts of the `src` and `dest` may disagree.
+    #[inline(always)]
+    pub fn copy_op_allow_transmute(
+        &mut self,
+        src: &impl Readable<'tcx, M::Provenance>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        self.copy_op_inner(
+            src, dest, /* allow_transmute */ true, /* validate_dest */ true,
+        )
+    }
+
+    /// Copies the data from an operand to a place.
+    /// `src` and `dest` must have the same layout and the copied value will be validated.
+    #[inline(always)]
+    pub fn copy_op(
+        &mut self,
+        src: &impl Readable<'tcx, M::Provenance>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        self.copy_op_inner(
+            src, dest, /* allow_transmute */ false, /* validate_dest */ true,
+        )
+    }
+
+    /// Copies the data from an operand to a place.
+    /// `allow_transmute` indicates whether the layouts may disagree.
+    #[inline(always)]
+    #[instrument(skip(self), level = "debug")]
+    fn copy_op_inner(
+        &mut self,
+        src: &impl Readable<'tcx, M::Provenance>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
+        allow_transmute: bool,
+        validate_dest: bool,
+    ) -> InterpResult<'tcx> {
+        // Generally for transmutation, data must be valid both at the old and new type.
+        // But if the types are the same, the 2nd validation below suffices.
+        if src.layout().ty != dest.layout().ty && M::enforce_validity(self, src.layout()) {
+            self.validate_operand(&src.to_op(self)?)?;
+        }
+
+        // Do the actual copy.
+        self.copy_op_no_validate(src, dest, allow_transmute)?;
+
+        if validate_dest && M::enforce_validity(self, dest.layout()) {
+            // Data got changed, better make sure it matches the type!
+            self.validate_operand(&dest.to_op(self)?)?;
+        }
+
+        Ok(())
+    }
+
+    /// Copies the data from an operand to a place.
+    /// `allow_transmute` indicates whether the layouts may disagree.
+    /// Also, if you use this you are responsible for validating that things get copied at the
+    /// right type.
+    #[instrument(skip(self), level = "debug")]
+    fn copy_op_no_validate(
+        &mut self,
+        src: &impl Readable<'tcx, M::Provenance>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
+        allow_transmute: bool,
+    ) -> InterpResult<'tcx> {
+        // We do NOT compare the types for equality, because well-typed code can
+        // actually "transmute" `&mut T` to `&T` in an assignment without a cast.
+        let layout_compat =
+            mir_assign_valid_types(*self.tcx, self.param_env, src.layout(), dest.layout());
+        if !allow_transmute && !layout_compat {
+            span_bug!(
+                self.cur_span(),
+                "type mismatch when copying!\nsrc: {},\ndest: {}",
+                src.layout().ty,
+                dest.layout().ty,
+            );
+        }
+
+        // Let us see if the layout is simple so we take a shortcut,
+        // avoid force_allocation.
+        let src = match self.read_immediate_raw(src)? {
+            Right(src_val) => {
+                assert!(!src.layout().is_unsized());
+                assert!(!dest.layout().is_unsized());
+                assert_eq!(src.layout().size, dest.layout().size);
+                // Yay, we got a value that we can write directly.
+                return if layout_compat {
+                    self.write_immediate_no_validate(*src_val, dest)
+                } else {
+                    // This is tricky. The problematic case is `ScalarPair`: the `src_val` was
+                    // loaded using the offsets defined by `src.layout`. When we put this back into
+                    // the destination, we have to use the same offsets! So (a) we make sure we
+                    // write back to memory, and (b) we use `dest` *with the source layout*.
+                    let dest_mem = dest.force_mplace(self)?;
+                    self.write_immediate_to_mplace_no_validate(
+                        *src_val,
+                        src.layout(),
+                        dest_mem.mplace,
+                    )
+                };
+            }
+            Left(mplace) => mplace,
+        };
+        // Slow path, this does not fit into an immediate. Just memcpy.
+        trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout().ty);
+
+        let dest = dest.force_mplace(self)?;
+        let Some((dest_size, _)) = self.size_and_align_of_mplace(&dest)? else {
+            span_bug!(self.cur_span(), "copy_op needs (dynamically) sized values")
+        };
+        if cfg!(debug_assertions) {
+            let src_size = self.size_and_align_of_mplace(&src)?.unwrap().0;
+            assert_eq!(src_size, dest_size, "Cannot copy differently-sized data");
+        } else {
+            // As a cheap approximation, we compare the fixed parts of the size.
+            assert_eq!(src.layout.size, dest.layout.size);
+        }
+
+        // Setting `nonoverlapping` here only has an effect when we don't hit the fast-path above,
+        // but that should at least match what LLVM does where `memcpy` is also only used when the
+        // type does not have Scalar/ScalarPair layout.
+        // (Or as the `Assign` docs put it, assignments "not producing primitives" must be
+        // non-overlapping.)
+        // We check alignment separately, and *after* checking everything else.
+        // If an access is both OOB and misaligned, we want to see the bounds error.
+        self.mem_copy(src.ptr(), dest.ptr(), dest_size, /*nonoverlapping*/ true)?;
+        self.check_misalign(src.mplace.misaligned, CheckAlignMsg::BasedOn)?;
+        self.check_misalign(dest.mplace.misaligned, CheckAlignMsg::BasedOn)?;
+        Ok(())
+    }
+
+    /// Ensures that a place is in memory, and returns where it is.
+    /// If the place currently refers to a local that doesn't yet have a matching allocation,
+    /// create such an allocation.
+    /// This is essentially `force_to_memplace`.
+    #[instrument(skip(self), level = "debug")]
+    pub fn force_allocation(
+        &mut self,
+        place: &PlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+        let mplace = match place.place {
+            Place::Local { local, offset, locals_addr } => {
+                debug_assert_eq!(locals_addr, self.frame().locals_addr());
+                let whole_local = match self.frame_mut().locals[local].access_mut()? {
+                    &mut Operand::Immediate(local_val) => {
+                        // We need to make an allocation.
+
+                        // We need the layout of the local. We can NOT use the layout we got,
+                        // that might e.g., be an inner field of a struct with `Scalar` layout,
+                        // that has different alignment than the outer field.
+                        let local_layout = self.layout_of_local(&self.frame(), local, None)?;
+                        assert!(local_layout.is_sized(), "unsized locals cannot be immediate");
+                        let mplace = self.allocate(local_layout, MemoryKind::Stack)?;
+                        // Preserve old value. (As an optimization, we can skip this if it was uninit.)
+                        if !matches!(local_val, Immediate::Uninit) {
+                            // We don't have to validate as we can assume the local was already
+                            // valid for its type. We must not use any part of `place` here, that
+                            // could be a projection to a part of the local!
+                            self.write_immediate_to_mplace_no_validate(
+                                local_val,
+                                local_layout,
+                                mplace.mplace,
+                            )?;
+                        }
+                        M::after_local_allocated(self, local, &mplace)?;
+                        // Now we can call `access_mut` again, asserting it goes well, and actually
+                        // overwrite things. This points to the entire allocation, not just the part
+                        // the place refers to, i.e. we do this before we apply `offset`.
+                        *self.frame_mut().locals[local].access_mut().unwrap() =
+                            Operand::Indirect(mplace.mplace);
+                        mplace.mplace
+                    }
+                    &mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
+                };
+                if let Some(offset) = offset {
+                    // This offset is always inbounds, no need to check it again.
+                    whole_local.offset_with_meta_(
+                        offset,
+                        OffsetMode::Wrapping,
+                        MemPlaceMeta::None,
+                        self,
+                    )?
+                } else {
+                    // Preserve wide place metadata, do not call `offset`.
+                    whole_local
+                }
+            }
+            Place::Ptr(mplace) => mplace,
+        };
+        // Return with the original layout and align, so that the caller can go on
+        Ok(MPlaceTy { mplace, layout: place.layout })
+    }
+
+    pub fn allocate_dyn(
+        &mut self,
+        layout: TyAndLayout<'tcx>,
+        kind: MemoryKind<M::MemoryKind>,
+        meta: MemPlaceMeta<M::Provenance>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+        let Some((size, align)) = self.size_and_align_of(&meta, &layout)? else {
+            span_bug!(self.cur_span(), "cannot allocate space for `extern` type, size is not known")
+        };
+        let ptr = self.allocate_ptr(size, align, kind)?;
+        Ok(self.ptr_with_meta_to_mplace(ptr.into(), meta, layout))
+    }
+
+    pub fn allocate(
+        &mut self,
+        layout: TyAndLayout<'tcx>,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+        assert!(layout.is_sized());
+        self.allocate_dyn(layout, kind, MemPlaceMeta::None)
+    }
+
+    /// Returns a wide MPlace of type `str` to a new 1-aligned allocation.
+    pub fn allocate_str(
+        &mut self,
+        str: &str,
+        kind: MemoryKind<M::MemoryKind>,
+        mutbl: Mutability,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+        let ptr = self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl)?;
+        let meta = Scalar::from_target_usize(u64::try_from(str.len()).unwrap(), self);
+        let layout = self.layout_of(self.tcx.types.str_).unwrap();
+        Ok(self.ptr_with_meta_to_mplace(ptr.into(), MemPlaceMeta::Meta(meta), layout))
+    }
+
+    pub fn raw_const_to_mplace(
+        &self,
+        raw: mir::ConstAlloc<'tcx>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+        // This must be an allocation in `tcx`
+        let _ = self.tcx.global_alloc(raw.alloc_id);
+        let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?;
+        let layout = self.layout_of(raw.ty)?;
+        Ok(self.ptr_to_mplace(ptr.into(), layout))
+    }
+
+    /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
+    /// Aso returns the vtable.
+    pub(super) fn unpack_dyn_trait(
+        &self,
+        mplace: &MPlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> {
+        assert!(
+            matches!(mplace.layout.ty.kind(), ty::Dynamic(_, _, ty::Dyn)),
+            "`unpack_dyn_trait` only makes sense on `dyn*` types"
+        );
+        let vtable = mplace.meta().unwrap_meta().to_pointer(self)?;
+        let (ty, _) = self.get_ptr_vtable(vtable)?;
+        let layout = self.layout_of(ty)?;
+        // This is a kind of transmute, from a place with unsized type and metadata to
+        // a place with sized type and no metadata.
+        let mplace =
+            MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..mplace.mplace }, layout };
+        Ok((mplace, vtable))
+    }
+
+    /// Turn a `dyn* Trait` type into an value with the actual dynamic type.
+    /// Also returns the vtable.
+    pub(super) fn unpack_dyn_star<P: Projectable<'tcx, M::Provenance>>(
+        &self,
+        val: &P,
+    ) -> InterpResult<'tcx, (P, Pointer<Option<M::Provenance>>)> {
+        assert!(
+            matches!(val.layout().ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
+            "`unpack_dyn_star` only makes sense on `dyn*` types"
+        );
+        let data = self.project_field(val, 0)?;
+        let vtable = self.project_field(val, 1)?;
+        let vtable = self.read_pointer(&vtable.to_op(self)?)?;
+        let (ty, _) = self.get_ptr_vtable(vtable)?;
+        let layout = self.layout_of(ty)?;
+        // `data` is already the right thing but has the wrong type. So we transmute it, by
+        // projecting with offset 0.
+        let data = data.transmute(layout, self)?;
+        Ok((data, vtable))
+    }
+}
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+    use super::*;
+    use rustc_data_structures::static_assert_size;
+    // tidy-alphabetical-start
+    static_assert_size!(MemPlace, 48);
+    static_assert_size!(MemPlaceMeta, 24);
+    static_assert_size!(MPlaceTy<'_>, 64);
+    static_assert_size!(Place, 48);
+    static_assert_size!(PlaceTy<'_>, 64);
+    // tidy-alphabetical-end
+}
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
new file mode 100644
index 00000000000..5ff78f7b8c9
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -0,0 +1,370 @@
+//! This file implements "place projections"; basically a symmetric API for 3 types: MPlaceTy, OpTy, PlaceTy.
+//!
+//! OpTy and PlaceTy generally work by "let's see if we are actually an MPlaceTy, and do something custom if not".
+//! For PlaceTy, the custom thing is basically always to call `force_allocation` and then use the MPlaceTy logic anyway.
+//! For OpTy, the custom thing on field pojections has to be pretty clever (since `Operand::Immediate` can have fields),
+//! but for array/slice operations it only has to worry about `Operand::Uninit`. That makes the value part trivial,
+//! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually
+//! implement the logic on OpTy, and MPlaceTy calls that.
+
+use std::marker::PhantomData;
+use std::ops::Range;
+
+use rustc_middle::mir;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::Ty;
+use rustc_target::abi::Size;
+use rustc_target::abi::{self, VariantIdx};
+
+use super::{InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Provenance, Scalar};
+
+/// Describes the constraints placed on offset-projections.
+#[derive(Copy, Clone, Debug)]
+pub enum OffsetMode {
+    /// The offset has to be inbounds, like `ptr::offset`.
+    Inbounds,
+    /// No constraints, just wrap around the edge of the address space.
+    Wrapping,
+}
+
+/// A thing that we can project into, and that has a layout.
+pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
+    /// Get the layout.
+    fn layout(&self) -> TyAndLayout<'tcx>;
+
+    /// Get the metadata of a wide value.
+    fn meta(&self) -> MemPlaceMeta<Prov>;
+
+    /// Get the length of a slice/string/array stored here.
+    fn len<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, u64> {
+        let layout = self.layout();
+        if layout.is_unsized() {
+            // We need to consult `meta` metadata
+            match layout.ty.kind() {
+                ty::Slice(..) | ty::Str => self.meta().unwrap_meta().to_target_usize(ecx),
+                _ => bug!("len not supported on unsized type {:?}", layout.ty),
+            }
+        } else {
+            // Go through the layout. There are lots of types that support a length,
+            // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
+            match layout.fields {
+                abi::FieldsShape::Array { count, .. } => Ok(count),
+                _ => bug!("len not supported on sized type {:?}", layout.ty),
+            }
+        }
+    }
+
+    /// Offset the value by the given amount, replacing the layout and metadata.
+    fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        offset: Size,
+        mode: OffsetMode,
+        meta: MemPlaceMeta<Prov>,
+        layout: TyAndLayout<'tcx>,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, Self>;
+
+    fn offset<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        offset: Size,
+        layout: TyAndLayout<'tcx>,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, Self> {
+        assert!(layout.is_sized());
+        self.offset_with_meta(offset, OffsetMode::Inbounds, MemPlaceMeta::None, layout, ecx)
+    }
+
+    fn transmute<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        layout: TyAndLayout<'tcx>,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, Self> {
+        assert!(self.layout().is_sized() && layout.is_sized());
+        assert_eq!(self.layout().size, layout.size);
+        self.offset_with_meta(Size::ZERO, OffsetMode::Wrapping, MemPlaceMeta::None, layout, ecx)
+    }
+
+    /// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for
+    /// reading from this thing.
+    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
+}
+
+/// A type representing iteration over the elements of an array.
+pub struct ArrayIterator<'tcx, 'a, Prov: Provenance, P: Projectable<'tcx, Prov>> {
+    base: &'a P,
+    range: Range<u64>,
+    stride: Size,
+    field_layout: TyAndLayout<'tcx>,
+    _phantom: PhantomData<Prov>, // otherwise it says `Prov` is never used...
+}
+
+impl<'tcx, 'a, Prov: Provenance, P: Projectable<'tcx, Prov>> ArrayIterator<'tcx, 'a, Prov, P> {
+    /// Should be the same `ecx` on each call, and match the one used to create the iterator.
+    pub fn next<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &mut self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, Option<(u64, P)>> {
+        let Some(idx) = self.range.next() else { return Ok(None) };
+        // We use `Wrapping` here since the offset has already been checked when the iterator was created.
+        Ok(Some((
+            idx,
+            self.base.offset_with_meta(
+                self.stride * idx,
+                OffsetMode::Wrapping,
+                MemPlaceMeta::None,
+                self.field_layout,
+                ecx,
+            )?,
+        )))
+    }
+}
+
+// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
+impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
+where
+    Prov: Provenance,
+    M: Machine<'mir, 'tcx, Provenance = Prov>,
+{
+    /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
+    /// always possible without allocating, so it can take `&self`. Also return the field's layout.
+    /// This supports both struct and array fields, but not slices!
+    ///
+    /// This also works for arrays, but then the `usize` index type is restricting.
+    /// For indexing into arrays, use `mplace_index`.
+    pub fn project_field<P: Projectable<'tcx, M::Provenance>>(
+        &self,
+        base: &P,
+        field: usize,
+    ) -> InterpResult<'tcx, P> {
+        // Slices nominally have length 0, so they will panic somewhere in `fields.offset`.
+        debug_assert!(
+            !matches!(base.layout().ty.kind(), ty::Slice(..)),
+            "`field` projection called on a slice -- call `index` projection instead"
+        );
+        let offset = base.layout().fields.offset(field);
+        // Computing the layout does normalization, so we get a normalized type out of this
+        // even if the field type is non-normalized (possible e.g. via associated types).
+        let field_layout = base.layout().field(self, field);
+
+        // Offset may need adjustment for unsized fields.
+        let (meta, offset) = if field_layout.is_unsized() {
+            assert!(!base.layout().is_sized());
+            let base_meta = base.meta();
+            // Re-use parent metadata to determine dynamic field layout.
+            // With custom DSTS, this *will* execute user-defined code, but the same
+            // happens at run-time so that's okay.
+            match self.size_and_align_of(&base_meta, &field_layout)? {
+                Some((_, align)) => {
+                    // For packed types, we need to cap alignment.
+                    let align = if let ty::Adt(def, _) = base.layout().ty.kind()
+                        && let Some(packed) = def.repr().pack
+                    {
+                        align.min(packed)
+                    } else {
+                        align
+                    };
+                    (base_meta, offset.align_to(align))
+                }
+                None if offset == Size::ZERO => {
+                    // If the offset is 0, then rounding it up to alignment wouldn't change anything,
+                    // so we can do this even for types where we cannot determine the alignment.
+                    (base_meta, offset)
+                }
+                None => {
+                    // We don't know the alignment of this field, so we cannot adjust.
+                    throw_unsup_format!("`extern type` does not have a known offset")
+                }
+            }
+        } else {
+            // base_meta could be present; we might be accessing a sized field of an unsized
+            // struct.
+            (MemPlaceMeta::None, offset)
+        };
+
+        base.offset_with_meta(offset, OffsetMode::Inbounds, meta, field_layout, self)
+    }
+
+    /// Downcasting to an enum variant.
+    pub fn project_downcast<P: Projectable<'tcx, M::Provenance>>(
+        &self,
+        base: &P,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, P> {
+        assert!(!base.meta().has_meta());
+        // Downcasts only change the layout.
+        // (In particular, no check about whether this is even the active variant -- that's by design,
+        // see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
+        // So we just "offset" by 0.
+        let layout = base.layout().for_variant(self, variant);
+        // This variant may in fact be uninhabited.
+        // See <https://github.com/rust-lang/rust/issues/120337>.
+
+        // This cannot be `transmute` as variants *can* have a smaller size than the entire enum.
+        base.offset(Size::ZERO, layout, self)
+    }
+
+    /// Compute the offset and field layout for accessing the given index.
+    pub fn project_index<P: Projectable<'tcx, M::Provenance>>(
+        &self,
+        base: &P,
+        index: u64,
+    ) -> InterpResult<'tcx, P> {
+        // Not using the layout method because we want to compute on u64
+        let (offset, field_layout) = match base.layout().fields {
+            abi::FieldsShape::Array { stride, count: _ } => {
+                // `count` is nonsense for slices, use the dynamic length instead.
+                let len = base.len(self)?;
+                if index >= len {
+                    // This can only be reached in ConstProp and non-rustc-MIR.
+                    throw_ub!(BoundsCheckFailed { len, index });
+                }
+                let offset = stride * index; // `Size` multiplication
+                // All fields have the same layout.
+                let field_layout = base.layout().field(self, 0);
+                (offset, field_layout)
+            }
+            _ => span_bug!(
+                self.cur_span(),
+                "`mplace_index` called on non-array type {:?}",
+                base.layout().ty
+            ),
+        };
+
+        base.offset(offset, field_layout, self)
+    }
+
+    fn project_constant_index<P: Projectable<'tcx, M::Provenance>>(
+        &self,
+        base: &P,
+        offset: u64,
+        min_length: u64,
+        from_end: bool,
+    ) -> InterpResult<'tcx, P> {
+        let n = base.len(self)?;
+        if n < min_length {
+            // This can only be reached in ConstProp and non-rustc-MIR.
+            throw_ub!(BoundsCheckFailed { len: min_length, index: n });
+        }
+
+        let index = if from_end {
+            assert!(0 < offset && offset <= min_length);
+            n.checked_sub(offset).unwrap()
+        } else {
+            assert!(offset < min_length);
+            offset
+        };
+
+        self.project_index(base, index)
+    }
+
+    /// Iterates over all fields of an array. Much more efficient than doing the
+    /// same by repeatedly calling `project_index`.
+    pub fn project_array_fields<'a, P: Projectable<'tcx, M::Provenance>>(
+        &self,
+        base: &'a P,
+    ) -> InterpResult<'tcx, ArrayIterator<'tcx, 'a, M::Provenance, P>> {
+        let abi::FieldsShape::Array { stride, .. } = base.layout().fields else {
+            span_bug!(self.cur_span(), "project_array_fields: expected an array layout");
+        };
+        let len = base.len(self)?;
+        let field_layout = base.layout().field(self, 0);
+        // Ensure that all the offsets are in-bounds once, up-front.
+        debug!("project_array_fields: {base:?} {len}");
+        base.offset(len * stride, self.layout_of(self.tcx.types.unit).unwrap(), self)?;
+        // Create the iterator.
+        Ok(ArrayIterator { base, range: 0..len, stride, field_layout, _phantom: PhantomData })
+    }
+
+    /// Subslicing
+    fn project_subslice<P: Projectable<'tcx, M::Provenance>>(
+        &self,
+        base: &P,
+        from: u64,
+        to: u64,
+        from_end: bool,
+    ) -> InterpResult<'tcx, P> {
+        let len = base.len(self)?; // also asserts that we have a type where this makes sense
+        let actual_to = if from_end {
+            if from.checked_add(to).map_or(true, |to| to > len) {
+                // This can only be reached in ConstProp and non-rustc-MIR.
+                throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) });
+            }
+            len.checked_sub(to).unwrap()
+        } else {
+            to
+        };
+
+        // Not using layout method because that works with usize, and does not work with slices
+        // (that have count 0 in their layout).
+        let from_offset = match base.layout().fields {
+            abi::FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked
+            _ => {
+                span_bug!(
+                    self.cur_span(),
+                    "unexpected layout of index access: {:#?}",
+                    base.layout()
+                )
+            }
+        };
+
+        // Compute meta and new layout
+        let inner_len = actual_to.checked_sub(from).unwrap();
+        let (meta, ty) = match base.layout().ty.kind() {
+            // It is not nice to match on the type, but that seems to be the only way to
+            // implement this.
+            ty::Array(inner, _) => {
+                (MemPlaceMeta::None, Ty::new_array(self.tcx.tcx, *inner, inner_len))
+            }
+            ty::Slice(..) => {
+                let len = Scalar::from_target_usize(inner_len, self);
+                (MemPlaceMeta::Meta(len), base.layout().ty)
+            }
+            _ => {
+                span_bug!(
+                    self.cur_span(),
+                    "cannot subslice non-array type: `{:?}`",
+                    base.layout().ty
+                )
+            }
+        };
+        let layout = self.layout_of(ty)?;
+
+        base.offset_with_meta(from_offset, OffsetMode::Inbounds, meta, layout, self)
+    }
+
+    /// Applying a general projection
+    #[instrument(skip(self), level = "trace")]
+    pub fn project<P>(&self, base: &P, proj_elem: mir::PlaceElem<'tcx>) -> InterpResult<'tcx, P>
+    where
+        P: Projectable<'tcx, M::Provenance> + From<MPlaceTy<'tcx, M::Provenance>> + std::fmt::Debug,
+    {
+        use rustc_middle::mir::ProjectionElem::*;
+        Ok(match proj_elem {
+            OpaqueCast(ty) => {
+                span_bug!(self.cur_span(), "OpaqueCast({ty}) encountered after borrowck")
+            }
+            // We don't want anything happening here, this is here as a dummy.
+            Subtype(_) => base.transmute(base.layout(), self)?,
+            Field(field, _) => self.project_field(base, field.index())?,
+            Downcast(_, variant) => self.project_downcast(base, variant)?,
+            Deref => self.deref_pointer(&base.to_op(self)?)?.into(),
+            Index(local) => {
+                let layout = self.layout_of(self.tcx.types.usize)?;
+                let n = self.local_to_op(local, Some(layout))?;
+                let n = self.read_target_usize(&n)?;
+                self.project_index(base, n)?
+            }
+            ConstantIndex { offset, min_length, from_end } => {
+                self.project_constant_index(base, offset, min_length, from_end)?
+            }
+            Subslice { from, to, from_end } => self.project_subslice(base, from, to, from_end)?,
+        })
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
new file mode 100644
index 00000000000..54bac70da38
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -0,0 +1,381 @@
+//! This module contains the `InterpCx` methods for executing a single step of the interpreter.
+//!
+//! The main entry point is the `step` method.
+
+use either::Either;
+
+use rustc_index::IndexSlice;
+use rustc_middle::mir;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_target::abi::{FieldIdx, FIRST_VARIANT};
+
+use super::{ImmTy, InterpCx, InterpResult, Machine, PlaceTy, Projectable, Scalar};
+use crate::util;
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Returns `true` as long as there are more things to do.
+    ///
+    /// This is used by [priroda](https://github.com/oli-obk/priroda)
+    ///
+    /// This is marked `#inline(always)` to work around adversarial codegen when `opt-level = 3`
+    #[inline(always)]
+    pub fn step(&mut self) -> InterpResult<'tcx, bool> {
+        if self.stack().is_empty() {
+            return Ok(false);
+        }
+
+        let Either::Left(loc) = self.frame().loc else {
+            // We are unwinding and this fn has no cleanup code.
+            // Just go on unwinding.
+            trace!("unwinding: skipping frame");
+            self.pop_stack_frame(/* unwinding */ true)?;
+            return Ok(true);
+        };
+        let basic_block = &self.body().basic_blocks[loc.block];
+
+        if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
+            let old_frames = self.frame_idx();
+            self.statement(stmt)?;
+            // Make sure we are not updating `statement_index` of the wrong frame.
+            assert_eq!(old_frames, self.frame_idx());
+            // Advance the program counter.
+            self.frame_mut().loc.as_mut().left().unwrap().statement_index += 1;
+            return Ok(true);
+        }
+
+        M::before_terminator(self)?;
+
+        let terminator = basic_block.terminator();
+        self.terminator(terminator)?;
+        Ok(true)
+    }
+
+    /// Runs the interpretation logic for the given `mir::Statement` at the current frame and
+    /// statement counter.
+    ///
+    /// This does NOT move the statement counter forward, the caller has to do that!
+    pub fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
+        info!("{:?}", stmt);
+
+        use rustc_middle::mir::StatementKind::*;
+
+        match &stmt.kind {
+            Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
+
+            SetDiscriminant { place, variant_index } => {
+                let dest = self.eval_place(**place)?;
+                self.write_discriminant(*variant_index, &dest)?;
+            }
+
+            Deinit(place) => {
+                let dest = self.eval_place(**place)?;
+                self.write_uninit(&dest)?;
+            }
+
+            // Mark locals as alive
+            StorageLive(local) => {
+                self.storage_live(*local)?;
+            }
+
+            // Mark locals as dead
+            StorageDead(local) => {
+                self.storage_dead(*local)?;
+            }
+
+            // No dynamic semantics attached to `FakeRead`; MIR
+            // interpreter is solely intended for borrowck'ed code.
+            FakeRead(..) => {}
+
+            // Stacked Borrows.
+            Retag(kind, place) => {
+                let dest = self.eval_place(**place)?;
+                M::retag_place_contents(self, *kind, &dest)?;
+            }
+
+            Intrinsic(box intrinsic) => self.emulate_nondiverging_intrinsic(intrinsic)?,
+
+            // Evaluate the place expression, without reading from it.
+            PlaceMention(box place) => {
+                let _ = self.eval_place(*place)?;
+            }
+
+            // This exists purely to guide borrowck lifetime inference, and does not have
+            // an operational effect.
+            AscribeUserType(..) => {}
+
+            // Currently, Miri discards Coverage statements. Coverage statements are only injected
+            // via an optional compile time MIR pass and have no side effects. Since Coverage
+            // statements don't exist at the source level, it is safe for Miri to ignore them, even
+            // for undefined behavior (UB) checks.
+            //
+            // A coverage counter inside a const expression (for example, a counter injected in a
+            // const function) is discarded when the const is evaluated at compile time. Whether
+            // this should change, and/or how to implement a const eval counter, is a subject of the
+            // following issue:
+            //
+            // FIXME(#73156): Handle source code coverage in const eval
+            Coverage(..) => {}
+
+            ConstEvalCounter => {
+                M::increment_const_eval_counter(self)?;
+            }
+
+            // Defined to do nothing. These are added by optimization passes, to avoid changing the
+            // size of MIR constantly.
+            Nop => {}
+        }
+
+        Ok(())
+    }
+
+    /// Evaluate an assignment statement.
+    ///
+    /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
+    /// type writes its results directly into the memory specified by the place.
+    pub fn eval_rvalue_into_place(
+        &mut self,
+        rvalue: &mir::Rvalue<'tcx>,
+        place: mir::Place<'tcx>,
+    ) -> InterpResult<'tcx> {
+        let dest = self.eval_place(place)?;
+        // FIXME: ensure some kind of non-aliasing between LHS and RHS?
+        // Also see https://github.com/rust-lang/rust/issues/68364.
+
+        use rustc_middle::mir::Rvalue::*;
+        match *rvalue {
+            ThreadLocalRef(did) => {
+                let ptr = M::thread_local_static_base_pointer(self, did)?;
+                self.write_pointer(ptr, &dest)?;
+            }
+
+            Use(ref operand) => {
+                // Avoid recomputing the layout
+                let op = self.eval_operand(operand, Some(dest.layout))?;
+                self.copy_op(&op, &dest)?;
+            }
+
+            CopyForDeref(place) => {
+                let op = self.eval_place_to_op(place, Some(dest.layout))?;
+                self.copy_op(&op, &dest)?;
+            }
+
+            BinaryOp(bin_op, box (ref left, ref right)) => {
+                let layout = util::binop_left_homogeneous(bin_op).then_some(dest.layout);
+                let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
+                let layout = util::binop_right_homogeneous(bin_op).then_some(left.layout);
+                let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
+                self.binop_ignore_overflow(bin_op, &left, &right, &dest)?;
+            }
+
+            CheckedBinaryOp(bin_op, box (ref left, ref right)) => {
+                // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
+                let left = self.read_immediate(&self.eval_operand(left, None)?)?;
+                let layout = util::binop_right_homogeneous(bin_op).then_some(left.layout);
+                let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
+                self.binop_with_overflow(bin_op, &left, &right, &dest)?;
+            }
+
+            UnaryOp(un_op, ref operand) => {
+                // The operand always has the same type as the result.
+                let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
+                let val = self.wrapping_unary_op(un_op, &val)?;
+                assert_eq!(val.layout, dest.layout, "layout mismatch for result of {un_op:?}");
+                self.write_immediate(*val, &dest)?;
+            }
+
+            Aggregate(box ref kind, ref operands) => {
+                self.write_aggregate(kind, operands, &dest)?;
+            }
+
+            Repeat(ref operand, _) => {
+                self.write_repeat(operand, &dest)?;
+            }
+
+            Len(place) => {
+                let src = self.eval_place(place)?;
+                let len = src.len(self)?;
+                self.write_scalar(Scalar::from_target_usize(len, self), &dest)?;
+            }
+
+            Ref(_, borrow_kind, place) => {
+                let src = self.eval_place(place)?;
+                let place = self.force_allocation(&src)?;
+                let val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
+                // A fresh reference was created, make sure it gets retagged.
+                let val = M::retag_ptr_value(
+                    self,
+                    if borrow_kind.allows_two_phase_borrow() {
+                        mir::RetagKind::TwoPhase
+                    } else {
+                        mir::RetagKind::Default
+                    },
+                    &val,
+                )?;
+                self.write_immediate(*val, &dest)?;
+            }
+
+            AddressOf(_, place) => {
+                // Figure out whether this is an addr_of of an already raw place.
+                let place_base_raw = if place.is_indirect_first_projection() {
+                    let ty = self.frame().body.local_decls[place.local].ty;
+                    ty.is_unsafe_ptr()
+                } else {
+                    // Not a deref, and thus not raw.
+                    false
+                };
+
+                let src = self.eval_place(place)?;
+                let place = self.force_allocation(&src)?;
+                let mut val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
+                if !place_base_raw {
+                    // If this was not already raw, it needs retagging.
+                    val = M::retag_ptr_value(self, mir::RetagKind::Raw, &val)?;
+                }
+                self.write_immediate(*val, &dest)?;
+            }
+
+            NullaryOp(ref null_op, ty) => {
+                let ty = self.instantiate_from_current_frame_and_normalize_erasing_regions(ty)?;
+                let layout = self.layout_of(ty)?;
+                if let mir::NullOp::SizeOf | mir::NullOp::AlignOf = null_op
+                    && layout.is_unsized()
+                {
+                    span_bug!(
+                        self.frame().current_span(),
+                        "{null_op:?} MIR operator called for unsized type {ty}",
+                    );
+                }
+                let val = match null_op {
+                    mir::NullOp::SizeOf => {
+                        let val = layout.size.bytes();
+                        Scalar::from_target_usize(val, self)
+                    }
+                    mir::NullOp::AlignOf => {
+                        let val = layout.align.abi.bytes();
+                        Scalar::from_target_usize(val, self)
+                    }
+                    mir::NullOp::OffsetOf(fields) => {
+                        let val = layout.offset_of_subfield(self, fields.iter()).bytes();
+                        Scalar::from_target_usize(val, self)
+                    }
+                    mir::NullOp::UbCheck(kind) => {
+                        // We want to enable checks for library UB, because the interpreter doesn't
+                        // know about those on its own.
+                        // But we want to disable checks for language UB, because the interpreter
+                        // has its own better checks for that.
+                        let should_check = match kind {
+                            mir::UbKind::LibraryUb => self.tcx.sess.opts.debug_assertions,
+                            mir::UbKind::LanguageUb => false,
+                        };
+                        Scalar::from_bool(should_check)
+                    }
+                };
+                self.write_scalar(val, &dest)?;
+            }
+
+            ShallowInitBox(ref operand, _) => {
+                let src = self.eval_operand(operand, None)?;
+                let v = self.read_immediate(&src)?;
+                self.write_immediate(*v, &dest)?;
+            }
+
+            Cast(cast_kind, ref operand, cast_ty) => {
+                let src = self.eval_operand(operand, None)?;
+                let cast_ty =
+                    self.instantiate_from_current_frame_and_normalize_erasing_regions(cast_ty)?;
+                self.cast(&src, cast_kind, cast_ty, &dest)?;
+            }
+
+            Discriminant(place) => {
+                let op = self.eval_place_to_op(place, None)?;
+                let variant = self.read_discriminant(&op)?;
+                let discr = self.discriminant_for_variant(op.layout.ty, variant)?;
+                self.write_immediate(*discr, &dest)?;
+            }
+        }
+
+        trace!("{:?}", self.dump_place(&dest));
+
+        Ok(())
+    }
+
+    /// Writes the aggregate to the destination.
+    #[instrument(skip(self), level = "trace")]
+    fn write_aggregate(
+        &mut self,
+        kind: &mir::AggregateKind<'tcx>,
+        operands: &IndexSlice<FieldIdx, mir::Operand<'tcx>>,
+        dest: &PlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        self.write_uninit(dest)?; // make sure all the padding ends up as uninit
+        let (variant_index, variant_dest, active_field_index) = match *kind {
+            mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
+                let variant_dest = self.project_downcast(dest, variant_index)?;
+                (variant_index, variant_dest, active_field_index)
+            }
+            _ => (FIRST_VARIANT, dest.clone(), None),
+        };
+        if active_field_index.is_some() {
+            assert_eq!(operands.len(), 1);
+        }
+        for (field_index, operand) in operands.iter_enumerated() {
+            let field_index = active_field_index.unwrap_or(field_index);
+            let field_dest = self.project_field(&variant_dest, field_index.as_usize())?;
+            let op = self.eval_operand(operand, Some(field_dest.layout))?;
+            self.copy_op(&op, &field_dest)?;
+        }
+        self.write_discriminant(variant_index, dest)
+    }
+
+    /// Repeats `operand` into the destination. `dest` must have array type, and that type
+    /// determines how often `operand` is repeated.
+    fn write_repeat(
+        &mut self,
+        operand: &mir::Operand<'tcx>,
+        dest: &PlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        let src = self.eval_operand(operand, None)?;
+        assert!(src.layout.is_sized());
+        let dest = self.force_allocation(&dest)?;
+        let length = dest.len(self)?;
+
+        if length == 0 {
+            // Nothing to copy... but let's still make sure that `dest` as a place is valid.
+            self.get_place_alloc_mut(&dest)?;
+        } else {
+            // Write the src to the first element.
+            let first = self.project_index(&dest, 0)?;
+            self.copy_op(&src, &first)?;
+
+            // This is performance-sensitive code for big static/const arrays! So we
+            // avoid writing each operand individually and instead just make many copies
+            // of the first element.
+            let elem_size = first.layout.size;
+            let first_ptr = first.ptr();
+            let rest_ptr = first_ptr.offset(elem_size, self)?;
+            // No alignment requirement since `copy_op` above already checked it.
+            self.mem_copy_repeatedly(
+                first_ptr,
+                rest_ptr,
+                elem_size,
+                length - 1,
+                /*nonoverlapping:*/ true,
+            )?;
+        }
+
+        Ok(())
+    }
+
+    /// Evaluate the given terminator. Will also adjust the stack frame and statement position accordingly.
+    fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
+        info!("{:?}", terminator.kind);
+
+        self.eval_terminator(terminator)?;
+        if !self.stack().is_empty() {
+            if let Either::Left(loc) = self.frame().loc {
+                info!("// executing {:?}", loc.block);
+            }
+        }
+        Ok(())
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
new file mode 100644
index 00000000000..82fb7ff1840
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -0,0 +1,973 @@
+use std::borrow::Cow;
+
+use either::Either;
+
+use rustc_middle::{
+    mir,
+    ty::{
+        self,
+        layout::{FnAbiOf, IntegerExt, LayoutOf, TyAndLayout},
+        AdtDef, Instance, Ty,
+    },
+};
+use rustc_span::{source_map::Spanned, sym};
+use rustc_target::abi::{self, FieldIdx};
+use rustc_target::abi::{
+    call::{ArgAbi, FnAbi, PassMode},
+    Integer,
+};
+use rustc_target::spec::abi::Abi;
+
+use super::{
+    CtfeProvenance, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy,
+    Projectable, Provenance, Scalar, StackPopCleanup,
+};
+use crate::fluent_generated as fluent;
+
+/// An argment passed to a function.
+#[derive(Clone, Debug)]
+pub enum FnArg<'tcx, Prov: Provenance = CtfeProvenance> {
+    /// Pass a copy of the given operand.
+    Copy(OpTy<'tcx, Prov>),
+    /// Allow for the argument to be passed in-place: destroy the value originally stored at that place and
+    /// make the place inaccessible for the duration of the function call.
+    InPlace(MPlaceTy<'tcx, Prov>),
+}
+
+impl<'tcx, Prov: Provenance> FnArg<'tcx, Prov> {
+    pub fn layout(&self) -> &TyAndLayout<'tcx> {
+        match self {
+            FnArg::Copy(op) => &op.layout,
+            FnArg::InPlace(mplace) => &mplace.layout,
+        }
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Make a copy of the given fn_arg. Any `InPlace` are degenerated to copies, no protection of the
+    /// original memory occurs.
+    pub fn copy_fn_arg(&self, arg: &FnArg<'tcx, M::Provenance>) -> OpTy<'tcx, M::Provenance> {
+        match arg {
+            FnArg::Copy(op) => op.clone(),
+            FnArg::InPlace(mplace) => mplace.clone().into(),
+        }
+    }
+
+    /// Make a copy of the given fn_args. Any `InPlace` are degenerated to copies, no protection of the
+    /// original memory occurs.
+    pub fn copy_fn_args(
+        &self,
+        args: &[FnArg<'tcx, M::Provenance>],
+    ) -> Vec<OpTy<'tcx, M::Provenance>> {
+        args.iter().map(|fn_arg| self.copy_fn_arg(fn_arg)).collect()
+    }
+
+    pub fn fn_arg_field(
+        &self,
+        arg: &FnArg<'tcx, M::Provenance>,
+        field: usize,
+    ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
+        Ok(match arg {
+            FnArg::Copy(op) => FnArg::Copy(self.project_field(op, field)?),
+            FnArg::InPlace(mplace) => FnArg::InPlace(self.project_field(mplace, field)?),
+        })
+    }
+
+    pub(super) fn eval_terminator(
+        &mut self,
+        terminator: &mir::Terminator<'tcx>,
+    ) -> InterpResult<'tcx> {
+        use rustc_middle::mir::TerminatorKind::*;
+        match terminator.kind {
+            Return => {
+                self.pop_stack_frame(/* unwinding */ false)?
+            }
+
+            Goto { target } => self.go_to_block(target),
+
+            SwitchInt { ref discr, ref targets } => {
+                let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
+                trace!("SwitchInt({:?})", *discr);
+
+                // Branch to the `otherwise` case by default, if no match is found.
+                let mut target_block = targets.otherwise();
+
+                for (const_int, target) in targets.iter() {
+                    // Compare using MIR BinOp::Eq, to also support pointer values.
+                    // (Avoiding `self.binary_op` as that does some redundant layout computation.)
+                    let res = self.wrapping_binary_op(
+                        mir::BinOp::Eq,
+                        &discr,
+                        &ImmTy::from_uint(const_int, discr.layout),
+                    )?;
+                    if res.to_scalar().to_bool()? {
+                        target_block = target;
+                        break;
+                    }
+                }
+
+                self.go_to_block(target_block);
+            }
+
+            Call {
+                ref func,
+                ref args,
+                destination,
+                target,
+                unwind,
+                call_source: _,
+                fn_span: _,
+            } => {
+                let old_stack = self.frame_idx();
+                let old_loc = self.frame().loc;
+                let func = self.eval_operand(func, None)?;
+                let args = self.eval_fn_call_arguments(args)?;
+
+                let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx);
+                let fn_sig =
+                    self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder);
+                let extra_args = &args[fn_sig.inputs().len()..];
+                let extra_args =
+                    self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout().ty));
+
+                let (fn_val, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
+                    ty::FnPtr(_sig) => {
+                        let fn_ptr = self.read_pointer(&func)?;
+                        let fn_val = self.get_ptr_fn(fn_ptr)?;
+                        (fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false)
+                    }
+                    ty::FnDef(def_id, args) => {
+                        let instance = self.resolve(def_id, args)?;
+                        (
+                            FnVal::Instance(instance),
+                            self.fn_abi_of_instance(instance, extra_args)?,
+                            instance.def.requires_caller_location(*self.tcx),
+                        )
+                    }
+                    _ => span_bug!(
+                        terminator.source_info.span,
+                        "invalid callee of type {}",
+                        func.layout.ty
+                    ),
+                };
+
+                let destination = self.force_allocation(&self.eval_place(destination)?)?;
+                self.eval_fn_call(
+                    fn_val,
+                    (fn_sig.abi, fn_abi),
+                    &args,
+                    with_caller_location,
+                    &destination,
+                    target,
+                    if fn_abi.can_unwind { unwind } else { mir::UnwindAction::Unreachable },
+                )?;
+                // Sanity-check that `eval_fn_call` either pushed a new frame or
+                // did a jump to another block.
+                if self.frame_idx() == old_stack && self.frame().loc == old_loc {
+                    span_bug!(terminator.source_info.span, "evaluating this call made no progress");
+                }
+            }
+
+            Drop { place, target, unwind, replace: _ } => {
+                let frame = self.frame();
+                let ty = place.ty(&frame.body.local_decls, *self.tcx).ty;
+                let ty = self.instantiate_from_frame_and_normalize_erasing_regions(frame, ty)?;
+                let instance = Instance::resolve_drop_in_place(*self.tcx, ty);
+                if let ty::InstanceDef::DropGlue(_, None) = instance.def {
+                    // This is the branch we enter if and only if the dropped type has no drop glue
+                    // whatsoever. This can happen as a result of monomorphizing a drop of a
+                    // generic. In order to make sure that generic and non-generic code behaves
+                    // roughly the same (and in keeping with Mir semantics) we do nothing here.
+                    self.go_to_block(target);
+                    return Ok(());
+                }
+                let place = self.eval_place(place)?;
+                trace!("TerminatorKind::drop: {:?}, type {}", place, ty);
+                self.drop_in_place(&place, instance, target, unwind)?;
+            }
+
+            Assert { ref cond, expected, ref msg, target, unwind } => {
+                let ignored =
+                    M::ignore_optional_overflow_checks(self) && msg.is_optional_overflow_check();
+                let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?;
+                if ignored || expected == cond_val {
+                    self.go_to_block(target);
+                } else {
+                    M::assert_panic(self, msg, unwind)?;
+                }
+            }
+
+            UnwindTerminate(reason) => {
+                M::unwind_terminate(self, reason)?;
+            }
+
+            // When we encounter Resume, we've finished unwinding
+            // cleanup for the current stack frame. We pop it in order
+            // to continue unwinding the next frame
+            UnwindResume => {
+                trace!("unwinding: resuming from cleanup");
+                // By definition, a Resume terminator means
+                // that we're unwinding
+                self.pop_stack_frame(/* unwinding */ true)?;
+                return Ok(());
+            }
+
+            // It is UB to ever encounter this.
+            Unreachable => throw_ub!(Unreachable),
+
+            // These should never occur for MIR we actually run.
+            FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | CoroutineDrop => span_bug!(
+                terminator.source_info.span,
+                "{:#?} should have been eliminated by MIR pass",
+                terminator.kind
+            ),
+
+            InlineAsm { template, ref operands, options, ref targets, .. } => {
+                M::eval_inline_asm(self, template, operands, options, targets)?;
+            }
+        }
+
+        Ok(())
+    }
+
+    /// Evaluate the arguments of a function call
+    pub(super) fn eval_fn_call_arguments(
+        &self,
+        ops: &[Spanned<mir::Operand<'tcx>>],
+    ) -> InterpResult<'tcx, Vec<FnArg<'tcx, M::Provenance>>> {
+        ops.iter()
+            .map(|op| {
+                let arg = match &op.node {
+                    mir::Operand::Copy(_) | mir::Operand::Constant(_) => {
+                        // Make a regular copy.
+                        let op = self.eval_operand(&op.node, None)?;
+                        FnArg::Copy(op)
+                    }
+                    mir::Operand::Move(place) => {
+                        // If this place lives in memory, preserve its location.
+                        // We call `place_to_op` which will be an `MPlaceTy` whenever there exists
+                        // an mplace for this place. (This is in contrast to `PlaceTy::as_mplace_or_local`
+                        // which can return a local even if that has an mplace.)
+                        let place = self.eval_place(*place)?;
+                        let op = self.place_to_op(&place)?;
+
+                        match op.as_mplace_or_imm() {
+                            Either::Left(mplace) => FnArg::InPlace(mplace),
+                            Either::Right(_imm) => {
+                                // This argument doesn't live in memory, so there's no place
+                                // to make inaccessible during the call.
+                                // We rely on there not being any stray `PlaceTy` that would let the
+                                // caller directly access this local!
+                                // This is also crucial for tail calls, where we want the `FnArg` to
+                                // stay valid when the old stack frame gets popped.
+                                FnArg::Copy(op)
+                            }
+                        }
+                    }
+                };
+
+                Ok(arg)
+            })
+            .collect()
+    }
+
+    /// Find the wrapped inner type of a transparent wrapper.
+    /// Must not be called on 1-ZST (as they don't have a uniquely defined "wrapped field").
+    ///
+    /// We work with `TyAndLayout` here since that makes it much easier to iterate over all fields.
+    fn unfold_transparent(
+        &self,
+        layout: TyAndLayout<'tcx>,
+        may_unfold: impl Fn(AdtDef<'tcx>) -> bool,
+    ) -> TyAndLayout<'tcx> {
+        match layout.ty.kind() {
+            ty::Adt(adt_def, _) if adt_def.repr().transparent() && may_unfold(*adt_def) => {
+                assert!(!adt_def.is_enum());
+                // Find the non-1-ZST field, and recurse.
+                let (_, field) = layout.non_1zst_field(self).unwrap();
+                self.unfold_transparent(field, may_unfold)
+            }
+            // Not a transparent type, no further unfolding.
+            _ => layout,
+        }
+    }
+
+    /// Unwrap types that are guaranteed a null-pointer-optimization
+    fn unfold_npo(&self, layout: TyAndLayout<'tcx>) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+        // Check if this is `Option` wrapping some type.
+        let inner = match layout.ty.kind() {
+            ty::Adt(def, args) if self.tcx.is_diagnostic_item(sym::Option, def.did()) => {
+                args[0].as_type().unwrap()
+            }
+            _ => {
+                // Not an `Option`.
+                return Ok(layout);
+            }
+        };
+        let inner = self.layout_of(inner)?;
+        // Check if the inner type is one of the NPO-guaranteed ones.
+        // For that we first unpeel transparent *structs* (but not unions).
+        let is_npo = |def: AdtDef<'tcx>| {
+            self.tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed)
+        };
+        let inner = self.unfold_transparent(inner, /* may_unfold */ |def| {
+            // Stop at NPO tpyes so that we don't miss that attribute in the check below!
+            def.is_struct() && !is_npo(def)
+        });
+        Ok(match inner.ty.kind() {
+            ty::Ref(..) | ty::FnPtr(..) => {
+                // Option<&T> behaves like &T, and same for fn()
+                inner
+            }
+            ty::Adt(def, _) if is_npo(*def) => {
+                // Once we found a `nonnull_optimization_guaranteed` type, further strip off
+                // newtype structs from it to find the underlying ABI type.
+                self.unfold_transparent(inner, /* may_unfold */ |def| def.is_struct())
+            }
+            _ => {
+                // Everything else we do not unfold.
+                layout
+            }
+        })
+    }
+
+    /// Check if these two layouts look like they are fn-ABI-compatible.
+    /// (We also compare the `PassMode`, so this doesn't have to check everything. But it turns out
+    /// that only checking the `PassMode` is insufficient.)
+    fn layout_compat(
+        &self,
+        caller: TyAndLayout<'tcx>,
+        callee: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, bool> {
+        // Fast path: equal types are definitely compatible.
+        if caller.ty == callee.ty {
+            return Ok(true);
+        }
+        // 1-ZST are compatible with all 1-ZST (and with nothing else).
+        if caller.is_1zst() || callee.is_1zst() {
+            return Ok(caller.is_1zst() && callee.is_1zst());
+        }
+        // Unfold newtypes and NPO optimizations.
+        let unfold = |layout: TyAndLayout<'tcx>| {
+            self.unfold_npo(self.unfold_transparent(layout, /* may_unfold */ |_def| true))
+        };
+        let caller = unfold(caller)?;
+        let callee = unfold(callee)?;
+        // Now see if these inner types are compatible.
+
+        // Compatible pointer types. For thin pointers, we have to accept even non-`repr(transparent)`
+        // things as compatible due to `DispatchFromDyn`. For instance, `Rc<i32>` and `*mut i32`
+        // must be compatible. So we just accept everything with Pointer ABI as compatible,
+        // even if this will accept some code that is not stably guaranteed to work.
+        // This also handles function pointers.
+        let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.abi {
+            abi::Abi::Scalar(s) => match s.primitive() {
+                abi::Primitive::Pointer(addr_space) => Some(addr_space),
+                _ => None,
+            },
+            _ => None,
+        };
+        if let (Some(caller), Some(callee)) = (thin_pointer(caller), thin_pointer(callee)) {
+            return Ok(caller == callee);
+        }
+        // For wide pointers we have to get the pointee type.
+        let pointee_ty = |ty: Ty<'tcx>| -> InterpResult<'tcx, Option<Ty<'tcx>>> {
+            // We cannot use `builtin_deref` here since we need to reject `Box<T, MyAlloc>`.
+            Ok(Some(match ty.kind() {
+                ty::Ref(_, ty, _) => *ty,
+                ty::RawPtr(mt) => mt.ty,
+                // We only accept `Box` with the default allocator.
+                _ if ty.is_box_global(*self.tcx) => ty.boxed_ty(),
+                _ => return Ok(None),
+            }))
+        };
+        if let (Some(caller), Some(callee)) = (pointee_ty(caller.ty)?, pointee_ty(callee.ty)?) {
+            // This is okay if they have the same metadata type.
+            let meta_ty = |ty: Ty<'tcx>| {
+                // Even if `ty` is normalized, the search for the unsized tail will project
+                // to fields, which can yield non-normalized types. So we need to provide a
+                // normalization function.
+                let normalize = |ty| self.tcx.normalize_erasing_regions(self.param_env, ty);
+                ty.ptr_metadata_ty(*self.tcx, normalize)
+            };
+            return Ok(meta_ty(caller) == meta_ty(callee));
+        }
+
+        // Compatible integer types (in particular, usize vs ptr-sized-u32/u64).
+        // `char` counts as `u32.`
+        let int_ty = |ty: Ty<'tcx>| {
+            Some(match ty.kind() {
+                ty::Int(ity) => (Integer::from_int_ty(&self.tcx, *ity), /* signed */ true),
+                ty::Uint(uty) => (Integer::from_uint_ty(&self.tcx, *uty), /* signed */ false),
+                ty::Char => (Integer::I32, /* signed */ false),
+                _ => return None,
+            })
+        };
+        if let (Some(caller), Some(callee)) = (int_ty(caller.ty), int_ty(callee.ty)) {
+            // This is okay if they are the same integer type.
+            return Ok(caller == callee);
+        }
+
+        // Fall back to exact equality.
+        // FIXME: We are missing the rules for "repr(C) wrapping compatible types".
+        Ok(caller == callee)
+    }
+
+    fn check_argument_compat(
+        &self,
+        caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+        callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+    ) -> InterpResult<'tcx, bool> {
+        // We do not want to accept things as ABI-compatible that just "happen to be" compatible on the current target,
+        // so we implement a type-based check that reflects the guaranteed rules for ABI compatibility.
+        if self.layout_compat(caller_abi.layout, callee_abi.layout)? {
+            // Ensure that our checks imply actual ABI compatibility for this concrete call.
+            assert!(caller_abi.eq_abi(callee_abi));
+            return Ok(true);
+        } else {
+            trace!(
+                "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
+                caller_abi,
+                callee_abi
+            );
+            return Ok(false);
+        }
+    }
+
+    /// Initialize a single callee argument, checking the types for compatibility.
+    fn pass_argument<'x, 'y>(
+        &mut self,
+        caller_args: &mut impl Iterator<
+            Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
+        >,
+        callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+        callee_arg: &mir::Place<'tcx>,
+        callee_ty: Ty<'tcx>,
+        already_live: bool,
+    ) -> InterpResult<'tcx>
+    where
+        'tcx: 'x,
+        'tcx: 'y,
+    {
+        assert_eq!(callee_ty, callee_abi.layout.ty);
+        if matches!(callee_abi.mode, PassMode::Ignore) {
+            // This one is skipped. Still must be made live though!
+            if !already_live {
+                self.storage_live(callee_arg.as_local().unwrap())?;
+            }
+            return Ok(());
+        }
+        // Find next caller arg.
+        let Some((caller_arg, caller_abi)) = caller_args.next() else {
+            throw_ub_custom!(fluent::const_eval_not_enough_caller_args);
+        };
+        assert_eq!(caller_arg.layout().layout, caller_abi.layout.layout);
+        // Sadly we cannot assert that `caller_arg.layout().ty` and `caller_abi.layout.ty` are
+        // equal; in closures the types sometimes differ. We just hope that `caller_abi` is the
+        // right type to print to the user.
+
+        // Check compatibility
+        if !self.check_argument_compat(caller_abi, callee_abi)? {
+            throw_ub!(AbiMismatchArgument {
+                caller_ty: caller_abi.layout.ty,
+                callee_ty: callee_abi.layout.ty
+            });
+        }
+        // We work with a copy of the argument for now; if this is in-place argument passing, we
+        // will later protect the source it comes from. This means the callee cannot observe if we
+        // did in-place of by-copy argument passing, except for pointer equality tests.
+        let caller_arg_copy = self.copy_fn_arg(caller_arg);
+        if !already_live {
+            let local = callee_arg.as_local().unwrap();
+            let meta = caller_arg_copy.meta();
+            // `check_argument_compat` ensures that if metadata is needed, both have the same type,
+            // so we know they will use the metadata the same way.
+            assert!(!meta.has_meta() || caller_arg_copy.layout.ty == callee_ty);
+
+            self.storage_live_dyn(local, meta)?;
+        }
+        // Now we can finally actually evaluate the callee place.
+        let callee_arg = self.eval_place(*callee_arg)?;
+        // We allow some transmutes here.
+        // FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
+        // is true for all `copy_op`, but there are a lot of special cases for argument passing
+        // specifically.)
+        self.copy_op_allow_transmute(&caller_arg_copy, &callee_arg)?;
+        // If this was an in-place pass, protect the place it comes from for the duration of the call.
+        if let FnArg::InPlace(mplace) = caller_arg {
+            M::protect_in_place_function_argument(self, mplace)?;
+        }
+        Ok(())
+    }
+
+    /// Call this function -- pushing the stack frame and initializing the arguments.
+    ///
+    /// `caller_fn_abi` is used to determine if all the arguments are passed the proper way.
+    /// However, we also need `caller_abi` to determine if we need to do untupling of arguments.
+    ///
+    /// `with_caller_location` indicates whether the caller passed a caller location. Miri
+    /// implements caller locations without argument passing, but to match `FnAbi` we need to know
+    /// when those arguments are present.
+    pub(crate) fn eval_fn_call(
+        &mut self,
+        fn_val: FnVal<'tcx, M::ExtraFnVal>,
+        (caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>),
+        args: &[FnArg<'tcx, M::Provenance>],
+        with_caller_location: bool,
+        destination: &MPlaceTy<'tcx, M::Provenance>,
+        target: Option<mir::BasicBlock>,
+        mut unwind: mir::UnwindAction,
+    ) -> InterpResult<'tcx> {
+        trace!("eval_fn_call: {:#?}", fn_val);
+
+        let instance = match fn_val {
+            FnVal::Instance(instance) => instance,
+            FnVal::Other(extra) => {
+                return M::call_extra_fn(
+                    self,
+                    extra,
+                    caller_abi,
+                    args,
+                    destination,
+                    target,
+                    unwind,
+                );
+            }
+        };
+
+        match instance.def {
+            ty::InstanceDef::Intrinsic(def_id) => {
+                assert!(self.tcx.intrinsic(def_id).is_some());
+                // FIXME: Should `InPlace` arguments be reset to uninit?
+                M::call_intrinsic(
+                    self,
+                    instance,
+                    &self.copy_fn_args(args),
+                    destination,
+                    target,
+                    unwind,
+                )
+            }
+            ty::InstanceDef::VTableShim(..)
+            | ty::InstanceDef::ReifyShim(..)
+            | ty::InstanceDef::ClosureOnceShim { .. }
+            | ty::InstanceDef::ConstructCoroutineInClosureShim { .. }
+            | ty::InstanceDef::CoroutineKindShim { .. }
+            | ty::InstanceDef::FnPtrShim(..)
+            | ty::InstanceDef::DropGlue(..)
+            | ty::InstanceDef::CloneShim(..)
+            | ty::InstanceDef::FnPtrAddrShim(..)
+            | ty::InstanceDef::ThreadLocalShim(..)
+            | ty::InstanceDef::Item(_) => {
+                // We need MIR for this fn
+                let Some((body, instance)) = M::find_mir_or_eval_fn(
+                    self,
+                    instance,
+                    caller_abi,
+                    args,
+                    destination,
+                    target,
+                    unwind,
+                )?
+                else {
+                    return Ok(());
+                };
+
+                // Compute callee information using the `instance` returned by
+                // `find_mir_or_eval_fn`.
+                // FIXME: for variadic support, do we have to somehow determine callee's extra_args?
+                let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
+
+                if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic {
+                    throw_unsup_format!("calling a c-variadic function is not supported");
+                }
+
+                if M::enforce_abi(self) {
+                    if caller_fn_abi.conv != callee_fn_abi.conv {
+                        throw_ub_custom!(
+                            fluent::const_eval_incompatible_calling_conventions,
+                            callee_conv = format!("{:?}", callee_fn_abi.conv),
+                            caller_conv = format!("{:?}", caller_fn_abi.conv),
+                        )
+                    }
+                }
+
+                // Check that all target features required by the callee (i.e., from
+                // the attribute `#[target_feature(enable = ...)]`) are enabled at
+                // compile time.
+                self.check_fn_target_features(instance)?;
+
+                if !callee_fn_abi.can_unwind {
+                    // The callee cannot unwind, so force the `Unreachable` unwind handling.
+                    unwind = mir::UnwindAction::Unreachable;
+                }
+
+                self.push_stack_frame(
+                    instance,
+                    body,
+                    destination,
+                    StackPopCleanup::Goto { ret: target, unwind },
+                )?;
+
+                // If an error is raised here, pop the frame again to get an accurate backtrace.
+                // To this end, we wrap it all in a `try` block.
+                let res: InterpResult<'tcx> = try {
+                    trace!(
+                        "caller ABI: {:?}, args: {:#?}",
+                        caller_abi,
+                        args.iter()
+                            .map(|arg| (
+                                arg.layout().ty,
+                                match arg {
+                                    FnArg::Copy(op) => format!("copy({op:?})"),
+                                    FnArg::InPlace(mplace) => format!("in-place({mplace:?})"),
+                                }
+                            ))
+                            .collect::<Vec<_>>()
+                    );
+                    trace!(
+                        "spread_arg: {:?}, locals: {:#?}",
+                        body.spread_arg,
+                        body.args_iter()
+                            .map(|local| (
+                                local,
+                                self.layout_of_local(self.frame(), local, None).unwrap().ty,
+                            ))
+                            .collect::<Vec<_>>()
+                    );
+
+                    // In principle, we have two iterators: Where the arguments come from, and where
+                    // they go to.
+
+                    // For where they come from: If the ABI is RustCall, we untuple the
+                    // last incoming argument. These two iterators do not have the same type,
+                    // so to keep the code paths uniform we accept an allocation
+                    // (for RustCall ABI only).
+                    let caller_args: Cow<'_, [FnArg<'tcx, M::Provenance>]> =
+                        if caller_abi == Abi::RustCall && !args.is_empty() {
+                            // Untuple
+                            let (untuple_arg, args) = args.split_last().unwrap();
+                            trace!("eval_fn_call: Will pass last argument by untupling");
+                            Cow::from(
+                                args.iter()
+                                    .map(|a| Ok(a.clone()))
+                                    .chain(
+                                        (0..untuple_arg.layout().fields.count())
+                                            .map(|i| self.fn_arg_field(untuple_arg, i)),
+                                    )
+                                    .collect::<InterpResult<'_, Vec<_>>>()?,
+                            )
+                        } else {
+                            // Plain arg passing
+                            Cow::from(args)
+                        };
+                    // If `with_caller_location` is set we pretend there is an extra argument (that
+                    // we will not pass).
+                    assert_eq!(
+                        caller_args.len() + if with_caller_location { 1 } else { 0 },
+                        caller_fn_abi.args.len(),
+                        "mismatch between caller ABI and caller arguments",
+                    );
+                    let mut caller_args = caller_args
+                        .iter()
+                        .zip(caller_fn_abi.args.iter())
+                        .filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore));
+
+                    // Now we have to spread them out across the callee's locals,
+                    // taking into account the `spread_arg`. If we could write
+                    // this is a single iterator (that handles `spread_arg`), then
+                    // `pass_argument` would be the loop body. It takes care to
+                    // not advance `caller_iter` for ignored arguments.
+                    let mut callee_args_abis = callee_fn_abi.args.iter();
+                    for local in body.args_iter() {
+                        // Construct the destination place for this argument. At this point all
+                        // locals are still dead, so we cannot construct a `PlaceTy`.
+                        let dest = mir::Place::from(local);
+                        // `layout_of_local` does more than just the instantiation we need to get the
+                        // type, but the result gets cached so this avoids calling the instantiation
+                        // query *again* the next time this local is accessed.
+                        let ty = self.layout_of_local(self.frame(), local, None)?.ty;
+                        if Some(local) == body.spread_arg {
+                            // Make the local live once, then fill in the value field by field.
+                            self.storage_live(local)?;
+                            // Must be a tuple
+                            let ty::Tuple(fields) = ty.kind() else {
+                                span_bug!(self.cur_span(), "non-tuple type for `spread_arg`: {ty}")
+                            };
+                            for (i, field_ty) in fields.iter().enumerate() {
+                                let dest = dest.project_deeper(
+                                    &[mir::ProjectionElem::Field(
+                                        FieldIdx::from_usize(i),
+                                        field_ty,
+                                    )],
+                                    *self.tcx,
+                                );
+                                let callee_abi = callee_args_abis.next().unwrap();
+                                self.pass_argument(
+                                    &mut caller_args,
+                                    callee_abi,
+                                    &dest,
+                                    field_ty,
+                                    /* already_live */ true,
+                                )?;
+                            }
+                        } else {
+                            // Normal argument. Cannot mark it as live yet, it might be unsized!
+                            let callee_abi = callee_args_abis.next().unwrap();
+                            self.pass_argument(
+                                &mut caller_args,
+                                callee_abi,
+                                &dest,
+                                ty,
+                                /* already_live */ false,
+                            )?;
+                        }
+                    }
+                    // If the callee needs a caller location, pretend we consume one more argument from the ABI.
+                    if instance.def.requires_caller_location(*self.tcx) {
+                        callee_args_abis.next().unwrap();
+                    }
+                    // Now we should have no more caller args or callee arg ABIs
+                    assert!(
+                        callee_args_abis.next().is_none(),
+                        "mismatch between callee ABI and callee body arguments"
+                    );
+                    if caller_args.next().is_some() {
+                        throw_ub_custom!(fluent::const_eval_too_many_caller_args);
+                    }
+                    // Don't forget to check the return type!
+                    if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret)? {
+                        throw_ub!(AbiMismatchReturn {
+                            caller_ty: caller_fn_abi.ret.layout.ty,
+                            callee_ty: callee_fn_abi.ret.layout.ty
+                        });
+                    }
+
+                    // Protect return place for in-place return value passing.
+                    M::protect_in_place_function_argument(self, &destination)?;
+
+                    // Don't forget to mark "initially live" locals as live.
+                    self.storage_live_for_always_live_locals()?;
+                };
+                match res {
+                    Err(err) => {
+                        self.stack_mut().pop();
+                        Err(err)
+                    }
+                    Ok(()) => Ok(()),
+                }
+            }
+            // `InstanceDef::Virtual` does not have callable MIR. Calls to `Virtual` instances must be
+            // codegen'd / interpreted as virtual calls through the vtable.
+            ty::InstanceDef::Virtual(def_id, idx) => {
+                let mut args = args.to_vec();
+                // We have to implement all "object safe receivers". So we have to go search for a
+                // pointer or `dyn Trait` type, but it could be wrapped in newtypes. So recursively
+                // unwrap those newtypes until we are there.
+                // An `InPlace` does nothing here, we keep the original receiver intact. We can't
+                // really pass the argument in-place anyway, and we are constructing a new
+                // `Immediate` receiver.
+                let mut receiver = self.copy_fn_arg(&args[0]);
+                let receiver_place = loop {
+                    match receiver.layout.ty.kind() {
+                        ty::Ref(..) | ty::RawPtr(..) => {
+                            // We do *not* use `deref_pointer` here: we don't want to conceptually
+                            // create a place that must be dereferenceable, since the receiver might
+                            // be a raw pointer and (for `*const dyn Trait`) we don't need to
+                            // actually access memory to resolve this method.
+                            // Also see <https://github.com/rust-lang/miri/issues/2786>.
+                            let val = self.read_immediate(&receiver)?;
+                            break self.ref_to_mplace(&val)?;
+                        }
+                        ty::Dynamic(.., ty::Dyn) => break receiver.assert_mem_place(), // no immediate unsized values
+                        ty::Dynamic(.., ty::DynStar) => {
+                            // Not clear how to handle this, so far we assume the receiver is always a pointer.
+                            span_bug!(
+                                self.cur_span(),
+                                "by-value calls on a `dyn*`... are those a thing?"
+                            );
+                        }
+                        _ => {
+                            // Not there yet, search for the only non-ZST field.
+                            // (The rules for `DispatchFromDyn` ensure there's exactly one such field.)
+                            let (idx, _) = receiver.layout.non_1zst_field(self).expect(
+                                "not exactly one non-1-ZST field in a `DispatchFromDyn` type",
+                            );
+                            receiver = self.project_field(&receiver, idx)?;
+                        }
+                    }
+                };
+
+                // Obtain the underlying trait we are working on, and the adjusted receiver argument.
+                let (vptr, dyn_ty, adjusted_receiver) = if let ty::Dynamic(data, _, ty::DynStar) =
+                    receiver_place.layout.ty.kind()
+                {
+                    let (recv, vptr) = self.unpack_dyn_star(&receiver_place)?;
+                    let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
+                    if dyn_trait != data.principal() {
+                        throw_ub_custom!(fluent::const_eval_dyn_star_call_vtable_mismatch);
+                    }
+
+                    (vptr, dyn_ty, recv.ptr())
+                } else {
+                    // Doesn't have to be a `dyn Trait`, but the unsized tail must be `dyn Trait`.
+                    // (For that reason we also cannot use `unpack_dyn_trait`.)
+                    let receiver_tail = self
+                        .tcx
+                        .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env);
+                    let ty::Dynamic(data, _, ty::Dyn) = receiver_tail.kind() else {
+                        span_bug!(
+                            self.cur_span(),
+                            "dynamic call on non-`dyn` type {}",
+                            receiver_tail
+                        )
+                    };
+                    assert!(receiver_place.layout.is_unsized());
+
+                    // Get the required information from the vtable.
+                    let vptr = receiver_place.meta().unwrap_meta().to_pointer(self)?;
+                    let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
+                    if dyn_trait != data.principal() {
+                        throw_ub_custom!(fluent::const_eval_dyn_call_vtable_mismatch);
+                    }
+
+                    // It might be surprising that we use a pointer as the receiver even if this
+                    // is a by-val case; this works because by-val passing of an unsized `dyn
+                    // Trait` to a function is actually desugared to a pointer.
+                    (vptr, dyn_ty, receiver_place.ptr())
+                };
+
+                // Now determine the actual method to call. We can do that in two different ways and
+                // compare them to ensure everything fits.
+                let Some(ty::VtblEntry::Method(fn_inst)) =
+                    self.get_vtable_entries(vptr)?.get(idx).copied()
+                else {
+                    // FIXME(fee1-dead) these could be variants of the UB info enum instead of this
+                    throw_ub_custom!(fluent::const_eval_dyn_call_not_a_method);
+                };
+                trace!("Virtual call dispatches to {fn_inst:#?}");
+                if cfg!(debug_assertions) {
+                    let tcx = *self.tcx;
+
+                    let trait_def_id = tcx.trait_of_item(def_id).unwrap();
+                    let virtual_trait_ref =
+                        ty::TraitRef::from_method(tcx, trait_def_id, instance.args);
+                    let existential_trait_ref =
+                        ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref);
+                    let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty);
+
+                    let concrete_method = Instance::resolve_for_vtable(
+                        tcx,
+                        self.param_env,
+                        def_id,
+                        instance.args.rebase_onto(tcx, trait_def_id, concrete_trait_ref.args),
+                    )
+                    .unwrap();
+                    assert_eq!(fn_inst, concrete_method);
+                }
+
+                // Adjust receiver argument. Layout can be any (thin) ptr.
+                let receiver_ty = Ty::new_mut_ptr(self.tcx.tcx, dyn_ty);
+                args[0] = FnArg::Copy(
+                    ImmTy::from_immediate(
+                        Scalar::from_maybe_pointer(adjusted_receiver, self).into(),
+                        self.layout_of(receiver_ty)?,
+                    )
+                    .into(),
+                );
+                trace!("Patched receiver operand to {:#?}", args[0]);
+                // Need to also adjust the type in the ABI. Strangely, the layout there is actually
+                // already fine! Just the type is bogus. This is due to what `force_thin_self_ptr`
+                // does in `fn_abi_new_uncached`; supposedly, codegen relies on having the bogus
+                // type, so we just patch this up locally.
+                let mut caller_fn_abi = caller_fn_abi.clone();
+                caller_fn_abi.args[0].layout.ty = receiver_ty;
+
+                // recurse with concrete function
+                self.eval_fn_call(
+                    FnVal::Instance(fn_inst),
+                    (caller_abi, &caller_fn_abi),
+                    &args,
+                    with_caller_location,
+                    destination,
+                    target,
+                    unwind,
+                )
+            }
+        }
+    }
+
+    fn check_fn_target_features(&self, instance: ty::Instance<'tcx>) -> InterpResult<'tcx, ()> {
+        // Calling functions with `#[target_feature]` is not unsafe on WASM, see #84988
+        let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+        if !self.tcx.sess.target.is_like_wasm
+            && attrs
+                .target_features
+                .iter()
+                .any(|feature| !self.tcx.sess.target_features.contains(feature))
+        {
+            throw_ub_custom!(
+                fluent::const_eval_unavailable_target_features_for_fn,
+                unavailable_feats = attrs
+                    .target_features
+                    .iter()
+                    .filter(|&feature| !self.tcx.sess.target_features.contains(feature))
+                    .fold(String::new(), |mut s, feature| {
+                        if !s.is_empty() {
+                            s.push_str(", ");
+                        }
+                        s.push_str(feature.as_str());
+                        s
+                    }),
+            );
+        }
+        Ok(())
+    }
+
+    fn drop_in_place(
+        &mut self,
+        place: &PlaceTy<'tcx, M::Provenance>,
+        instance: ty::Instance<'tcx>,
+        target: mir::BasicBlock,
+        unwind: mir::UnwindAction,
+    ) -> InterpResult<'tcx> {
+        trace!("drop_in_place: {:?},\n  instance={:?}", place, instance);
+        // We take the address of the object. This may well be unaligned, which is fine
+        // for us here. However, unaligned accesses will probably make the actual drop
+        // implementation fail -- a problem shared by rustc.
+        let place = self.force_allocation(place)?;
+
+        let place = match place.layout.ty.kind() {
+            ty::Dynamic(_, _, ty::Dyn) => {
+                // Dropping a trait object. Need to find actual drop fn.
+                self.unpack_dyn_trait(&place)?.0
+            }
+            ty::Dynamic(_, _, ty::DynStar) => {
+                // Dropping a `dyn*`. Need to find actual drop fn.
+                self.unpack_dyn_star(&place)?.0
+            }
+            _ => {
+                debug_assert_eq!(
+                    instance,
+                    ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty)
+                );
+                place
+            }
+        };
+        let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
+        let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
+
+        let arg = self.mplace_to_ref(&place)?;
+        let ret = MPlaceTy::fake_alloc_zst(self.layout_of(self.tcx.types.unit)?);
+
+        self.eval_fn_call(
+            FnVal::Instance(instance),
+            (Abi::Rust, fn_abi),
+            &[FnArg::Copy(arg.into())],
+            false,
+            &ret.into(),
+            Some(target),
+            unwind,
+        )
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs
new file mode 100644
index 00000000000..a9ca268a2a9
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/traits.rs
@@ -0,0 +1,59 @@
+use rustc_middle::mir::interpret::{InterpResult, Pointer};
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_target::abi::{Align, Size};
+
+use super::util::ensure_monomorphic_enough;
+use super::{InterpCx, Machine};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Creates a dynamic vtable for the given type and vtable origin. This is used only for
+    /// objects.
+    ///
+    /// The `trait_ref` encodes the erased self type. Hence, if we are making an object `Foo<Trait>`
+    /// from a value of type `Foo<T>`, then `trait_ref` would map `T: Trait`. `None` here means that
+    /// this is an auto trait without any methods, so we only need the basic vtable (drop, size,
+    /// align).
+    pub fn get_vtable_ptr(
+        &self,
+        ty: Ty<'tcx>,
+        poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+    ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
+        trace!("get_vtable(trait_ref={:?})", poly_trait_ref);
+
+        let (ty, poly_trait_ref) = self.tcx.erase_regions((ty, poly_trait_ref));
+
+        // All vtables must be monomorphic, bail out otherwise.
+        ensure_monomorphic_enough(*self.tcx, ty)?;
+        ensure_monomorphic_enough(*self.tcx, poly_trait_ref)?;
+
+        let vtable_symbolic_allocation = self.tcx.reserve_and_set_vtable_alloc(ty, poly_trait_ref);
+        let vtable_ptr = self.global_base_pointer(Pointer::from(vtable_symbolic_allocation))?;
+        Ok(vtable_ptr.into())
+    }
+
+    /// Returns a high-level representation of the entries of the given vtable.
+    pub fn get_vtable_entries(
+        &self,
+        vtable: Pointer<Option<M::Provenance>>,
+    ) -> InterpResult<'tcx, &'tcx [ty::VtblEntry<'tcx>]> {
+        let (ty, poly_trait_ref) = self.get_ptr_vtable(vtable)?;
+        Ok(if let Some(poly_trait_ref) = poly_trait_ref {
+            let trait_ref = poly_trait_ref.with_self_ty(*self.tcx, ty);
+            let trait_ref = self.tcx.erase_regions(trait_ref);
+            self.tcx.vtable_entries(trait_ref)
+        } else {
+            TyCtxt::COMMON_VTABLE_ENTRIES
+        })
+    }
+
+    pub fn get_vtable_size_and_align(
+        &self,
+        vtable: Pointer<Option<M::Provenance>>,
+    ) -> InterpResult<'tcx, (Size, Align)> {
+        let (ty, _trait_ref) = self.get_ptr_vtable(vtable)?;
+        let layout = self.layout_of(ty)?;
+        assert!(layout.is_sized(), "there are no vtables for unsized types");
+        Ok((layout.size, layout.align.abi))
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
new file mode 100644
index 00000000000..c83ef14c03f
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -0,0 +1,106 @@
+use crate::const_eval::{CompileTimeEvalContext, CompileTimeInterpreter, InterpretationResult};
+use crate::interpret::{MemPlaceMeta, MemoryKind};
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{Allocation, InterpResult, Pointer};
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::{
+    self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor,
+};
+use std::ops::ControlFlow;
+
+use super::{InterpCx, MPlaceTy};
+
+/// Checks whether a type contains generic parameters which must be instantiated.
+///
+/// In case it does, returns a `TooGeneric` const eval error. Note that due to polymorphization
+/// types may be "concrete enough" even though they still contain generic parameters in
+/// case these parameters are unused.
+pub(crate) fn ensure_monomorphic_enough<'tcx, T>(tcx: TyCtxt<'tcx>, ty: T) -> InterpResult<'tcx>
+where
+    T: TypeVisitable<TyCtxt<'tcx>>,
+{
+    debug!("ensure_monomorphic_enough: ty={:?}", ty);
+    if !ty.has_param() {
+        return Ok(());
+    }
+
+    struct FoundParam;
+    struct UsedParamsNeedInstantiationVisitor<'tcx> {
+        tcx: TyCtxt<'tcx>,
+    }
+
+    impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for UsedParamsNeedInstantiationVisitor<'tcx> {
+        type Result = ControlFlow<FoundParam>;
+
+        fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result {
+            if !ty.has_param() {
+                return ControlFlow::Continue(());
+            }
+
+            match *ty.kind() {
+                ty::Param(_) => ControlFlow::Break(FoundParam),
+                ty::Closure(def_id, args)
+                | ty::CoroutineClosure(def_id, args, ..)
+                | ty::Coroutine(def_id, args, ..)
+                | ty::FnDef(def_id, args) => {
+                    let instance = ty::InstanceDef::Item(def_id);
+                    let unused_params = self.tcx.unused_generic_params(instance);
+                    for (index, arg) in args.into_iter().enumerate() {
+                        let index = index
+                            .try_into()
+                            .expect("more generic parameters than can fit into a `u32`");
+                        // Only recurse when generic parameters in fns, closures and coroutines
+                        // are used and have to be instantiated.
+                        //
+                        // Just in case there are closures or coroutines within this arg,
+                        // recurse.
+                        if unused_params.is_used(index) && arg.has_param() {
+                            return arg.visit_with(self);
+                        }
+                    }
+                    ControlFlow::Continue(())
+                }
+                _ => ty.super_visit_with(self),
+            }
+        }
+
+        fn visit_const(&mut self, c: ty::Const<'tcx>) -> Self::Result {
+            match c.kind() {
+                ty::ConstKind::Param(..) => ControlFlow::Break(FoundParam),
+                _ => c.super_visit_with(self),
+            }
+        }
+    }
+
+    let mut vis = UsedParamsNeedInstantiationVisitor { tcx };
+    if matches!(ty.visit_with(&mut vis), ControlFlow::Break(FoundParam)) {
+        throw_inval!(TooGeneric);
+    } else {
+        Ok(())
+    }
+}
+
+impl<'tcx> InterpretationResult<'tcx> for mir::interpret::ConstAllocation<'tcx> {
+    fn make_result<'mir>(
+        mplace: MPlaceTy<'tcx>,
+        ecx: &mut InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>,
+    ) -> Self {
+        let alloc_id = mplace.ptr().provenance.unwrap().alloc_id();
+        let alloc = ecx.memory.alloc_map.swap_remove(&alloc_id).unwrap().1;
+        ecx.tcx.mk_const_alloc(alloc)
+    }
+}
+
+pub(crate) fn create_static_alloc<'mir, 'tcx: 'mir>(
+    ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
+    static_def_id: LocalDefId,
+    layout: TyAndLayout<'tcx>,
+) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
+    let alloc = Allocation::try_uninit(layout.size, layout.align.abi)?;
+    let alloc_id = ecx.tcx.reserve_and_set_static_alloc(static_def_id.into());
+    assert_eq!(ecx.machine.static_root_ids, None);
+    ecx.machine.static_root_ids = Some((alloc_id, static_def_id));
+    assert!(ecx.memory.alloc_map.insert(alloc_id, (MemoryKind::Stack, alloc)).is_none());
+    Ok(ecx.ptr_with_meta_to_mplace(Pointer::from(alloc_id).into(), MemPlaceMeta::None, layout))
+}
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
new file mode 100644
index 00000000000..d18600ce7d7
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -0,0 +1,1039 @@
+//! Check the validity invariant of a given value, and tell the user
+//! where in the value it got violated.
+//! In const context, this goes even further and tries to approximate const safety.
+//! That's useful because it means other passes (e.g. promotion) can rely on `const`s
+//! to be const-safe.
+
+use std::fmt::Write;
+use std::num::NonZero;
+
+use either::{Left, Right};
+
+use hir::def::DefKind;
+use rustc_ast::Mutability;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_middle::mir::interpret::{
+    ExpectedKind, InterpError, InvalidMetaKind, Misalignment, PointerKind, Provenance,
+    ValidationErrorInfo, ValidationErrorKind, ValidationErrorKind::*,
+};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::{self, Ty};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_target::abi::{
+    Abi, FieldIdx, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange,
+};
+
+use std::hash::Hash;
+
+use super::{
+    format_interp_error, machine::AllocMap, AllocId, CheckInAllocMsg, GlobalAlloc, ImmTy,
+    Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Pointer, Projectable,
+    Scalar, ValueVisitor,
+};
+
+// for the validation errors
+use super::InterpError::UndefinedBehavior as Ub;
+use super::InterpError::Unsupported as Unsup;
+use super::UndefinedBehaviorInfo::*;
+use super::UnsupportedOpInfo::*;
+
+macro_rules! throw_validation_failure {
+    ($where:expr, $kind: expr) => {{
+        let where_ = &$where;
+        let path = if !where_.is_empty() {
+            let mut path = String::new();
+            write_path(&mut path, where_);
+            Some(path)
+        } else {
+            None
+        };
+
+        throw_ub!(ValidationError(ValidationErrorInfo { path, kind: $kind }))
+    }};
+}
+
+/// If $e throws an error matching the pattern, throw a validation failure.
+/// Other errors are passed back to the caller, unchanged -- and if they reach the root of
+/// the visitor, we make sure only validation errors and `InvalidProgram` errors are left.
+/// This lets you use the patterns as a kind of validation list, asserting which errors
+/// can possibly happen:
+///
+/// ```ignore(illustrative)
+/// let v = try_validation!(some_fn(), some_path, {
+///     Foo | Bar | Baz => { "some failure" },
+/// });
+/// ```
+///
+/// The patterns must be of type `UndefinedBehaviorInfo`.
+/// An additional expected parameter can also be added to the failure message:
+///
+/// ```ignore(illustrative)
+/// let v = try_validation!(some_fn(), some_path, {
+///     Foo | Bar | Baz => { "some failure" } expected { "something that wasn't a failure" },
+/// });
+/// ```
+///
+/// An additional nicety is that both parameters actually take format args, so you can just write
+/// the format string in directly:
+///
+/// ```ignore(illustrative)
+/// let v = try_validation!(some_fn(), some_path, {
+///     Foo | Bar | Baz => { "{:?}", some_failure } expected { "{}", expected_value },
+/// });
+/// ```
+///
+macro_rules! try_validation {
+    ($e:expr, $where:expr,
+    $( $( $p:pat_param )|+ => $kind: expr ),+ $(,)?
+    ) => {{
+        match $e {
+            Ok(x) => x,
+            // We catch the error and turn it into a validation failure. We are okay with
+            // allocation here as this can only slow down builds that fail anyway.
+            Err(e) => match e.kind() {
+                $(
+                    $($p)|+ =>
+                       throw_validation_failure!(
+                            $where,
+                            $kind
+                        )
+                ),+,
+                #[allow(unreachable_patterns)]
+                _ => Err::<!, _>(e)?,
+            }
+        }
+    }};
+}
+
+/// We want to show a nice path to the invalid field for diagnostics,
+/// but avoid string operations in the happy case where no error happens.
+/// So we track a `Vec<PathElem>` where `PathElem` contains all the data we
+/// need to later print something for the user.
+#[derive(Copy, Clone, Debug)]
+pub enum PathElem {
+    Field(Symbol),
+    Variant(Symbol),
+    CoroutineState(VariantIdx),
+    CapturedVar(Symbol),
+    ArrayElem(usize),
+    TupleElem(usize),
+    Deref,
+    EnumTag,
+    CoroutineTag,
+    DynDowncast,
+}
+
+/// Extra things to check for during validation of CTFE results.
+#[derive(Copy, Clone)]
+pub enum CtfeValidationMode {
+    /// Validation of a `static`
+    Static { mutbl: Mutability },
+    /// Validation of a promoted.
+    Promoted,
+    /// Validation of a `const`.
+    /// `allow_immutable_unsafe_cell` says whether we allow `UnsafeCell` in immutable memory (which is the
+    /// case for the top-level allocation of a `const`, where this is fine because the allocation will be
+    /// copied at each use site).
+    Const { allow_immutable_unsafe_cell: bool },
+}
+
+impl CtfeValidationMode {
+    fn allow_immutable_unsafe_cell(self) -> bool {
+        match self {
+            CtfeValidationMode::Static { .. } => false,
+            CtfeValidationMode::Promoted { .. } => false,
+            CtfeValidationMode::Const { allow_immutable_unsafe_cell, .. } => {
+                allow_immutable_unsafe_cell
+            }
+        }
+    }
+}
+
+/// State for tracking recursive validation of references
+pub struct RefTracking<T, PATH = ()> {
+    pub seen: FxHashSet<T>,
+    pub todo: Vec<(T, PATH)>,
+}
+
+impl<T: Clone + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH> {
+    pub fn empty() -> Self {
+        RefTracking { seen: FxHashSet::default(), todo: vec![] }
+    }
+    pub fn new(op: T) -> Self {
+        let mut ref_tracking_for_consts =
+            RefTracking { seen: FxHashSet::default(), todo: vec![(op.clone(), PATH::default())] };
+        ref_tracking_for_consts.seen.insert(op);
+        ref_tracking_for_consts
+    }
+
+    pub fn track(&mut self, op: T, path: impl FnOnce() -> PATH) {
+        if self.seen.insert(op.clone()) {
+            trace!("Recursing below ptr {:#?}", op);
+            let path = path();
+            // Remember to come back to this later.
+            self.todo.push((op, path));
+        }
+    }
+}
+
+// FIXME make this translatable as well?
+/// Format a path
+fn write_path(out: &mut String, path: &[PathElem]) {
+    use self::PathElem::*;
+
+    for elem in path.iter() {
+        match elem {
+            Field(name) => write!(out, ".{name}"),
+            EnumTag => write!(out, ".<enum-tag>"),
+            Variant(name) => write!(out, ".<enum-variant({name})>"),
+            CoroutineTag => write!(out, ".<coroutine-tag>"),
+            CoroutineState(idx) => write!(out, ".<coroutine-state({})>", idx.index()),
+            CapturedVar(name) => write!(out, ".<captured-var({name})>"),
+            TupleElem(idx) => write!(out, ".{idx}"),
+            ArrayElem(idx) => write!(out, "[{idx}]"),
+            // `.<deref>` does not match Rust syntax, but it is more readable for long paths -- and
+            // some of the other items here also are not Rust syntax. Actually we can't
+            // even use the usual syntax because we are just showing the projections,
+            // not the root.
+            Deref => write!(out, ".<deref>"),
+            DynDowncast => write!(out, ".<dyn-downcast>"),
+        }
+        .unwrap()
+    }
+}
+
+struct ValidityVisitor<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    /// The `path` may be pushed to, but the part that is present when a function
+    /// starts must not be changed!  `visit_fields` and `visit_array` rely on
+    /// this stack discipline.
+    path: Vec<PathElem>,
+    ref_tracking: Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>>,
+    /// `None` indicates this is not validating for CTFE (but for runtime).
+    ctfe_mode: Option<CtfeValidationMode>,
+    ecx: &'rt InterpCx<'mir, 'tcx, M>,
+}
+
+impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, 'tcx, M> {
+    fn aggregate_field_path_elem(&mut self, layout: TyAndLayout<'tcx>, field: usize) -> PathElem {
+        // First, check if we are projecting to a variant.
+        match layout.variants {
+            Variants::Multiple { tag_field, .. } => {
+                if tag_field == field {
+                    return match layout.ty.kind() {
+                        ty::Adt(def, ..) if def.is_enum() => PathElem::EnumTag,
+                        ty::Coroutine(..) => PathElem::CoroutineTag,
+                        _ => bug!("non-variant type {:?}", layout.ty),
+                    };
+                }
+            }
+            Variants::Single { .. } => {}
+        }
+
+        // Now we know we are projecting to a field, so figure out which one.
+        match layout.ty.kind() {
+            // coroutines, closures, and coroutine-closures all have upvars that may be named.
+            ty::Closure(def_id, _) | ty::Coroutine(def_id, _) | ty::CoroutineClosure(def_id, _) => {
+                let mut name = None;
+                // FIXME this should be more descriptive i.e. CapturePlace instead of CapturedVar
+                // https://github.com/rust-lang/project-rfc-2229/issues/46
+                if let Some(local_def_id) = def_id.as_local() {
+                    let captures = self.ecx.tcx.closure_captures(local_def_id);
+                    if let Some(captured_place) = captures.get(field) {
+                        // Sometimes the index is beyond the number of upvars (seen
+                        // for a coroutine).
+                        let var_hir_id = captured_place.get_root_variable();
+                        let node = self.ecx.tcx.hir_node(var_hir_id);
+                        if let hir::Node::Pat(pat) = node {
+                            if let hir::PatKind::Binding(_, _, ident, _) = pat.kind {
+                                name = Some(ident.name);
+                            }
+                        }
+                    }
+                }
+
+                PathElem::CapturedVar(name.unwrap_or_else(|| {
+                    // Fall back to showing the field index.
+                    sym::integer(field)
+                }))
+            }
+
+            // tuples
+            ty::Tuple(_) => PathElem::TupleElem(field),
+
+            // enums
+            ty::Adt(def, ..) if def.is_enum() => {
+                // we might be projecting *to* a variant, or to a field *in* a variant.
+                match layout.variants {
+                    Variants::Single { index } => {
+                        // Inside a variant
+                        PathElem::Field(def.variant(index).fields[FieldIdx::from_usize(field)].name)
+                    }
+                    Variants::Multiple { .. } => bug!("we handled variants above"),
+                }
+            }
+
+            // other ADTs
+            ty::Adt(def, _) => {
+                PathElem::Field(def.non_enum_variant().fields[FieldIdx::from_usize(field)].name)
+            }
+
+            // arrays/slices
+            ty::Array(..) | ty::Slice(..) => PathElem::ArrayElem(field),
+
+            // dyn traits
+            ty::Dynamic(..) => PathElem::DynDowncast,
+
+            // nothing else has an aggregate layout
+            _ => bug!("aggregate_field_path_elem: got non-aggregate type {:?}", layout.ty),
+        }
+    }
+
+    fn with_elem<R>(
+        &mut self,
+        elem: PathElem,
+        f: impl FnOnce(&mut Self) -> InterpResult<'tcx, R>,
+    ) -> InterpResult<'tcx, R> {
+        // Remember the old state
+        let path_len = self.path.len();
+        // Record new element
+        self.path.push(elem);
+        // Perform operation
+        let r = f(self)?;
+        // Undo changes
+        self.path.truncate(path_len);
+        // Done
+        Ok(r)
+    }
+
+    fn read_immediate(
+        &self,
+        op: &OpTy<'tcx, M::Provenance>,
+        expected: ExpectedKind,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+        Ok(try_validation!(
+            self.ecx.read_immediate(op),
+            self.path,
+            Ub(InvalidUninitBytes(None)) =>
+                Uninit { expected },
+            // The `Unsup` cases can only occur during CTFE
+            Unsup(ReadPointerAsInt(_)) =>
+                PointerAsInt { expected },
+            Unsup(ReadPartialPointer(_)) =>
+                PartialPointer,
+        ))
+    }
+
+    fn read_scalar(
+        &self,
+        op: &OpTy<'tcx, M::Provenance>,
+        expected: ExpectedKind,
+    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+        Ok(self.read_immediate(op, expected)?.to_scalar())
+    }
+
+    fn check_wide_ptr_meta(
+        &mut self,
+        meta: MemPlaceMeta<M::Provenance>,
+        pointee: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx> {
+        let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
+        match tail.kind() {
+            ty::Dynamic(_, _, ty::Dyn) => {
+                let vtable = meta.unwrap_meta().to_pointer(self.ecx)?;
+                // Make sure it is a genuine vtable pointer.
+                let (_ty, _trait) = try_validation!(
+                    self.ecx.get_ptr_vtable(vtable),
+                    self.path,
+                    Ub(DanglingIntPointer(..) | InvalidVTablePointer(..)) =>
+                        InvalidVTablePtr { value: format!("{vtable}") }
+                );
+                // FIXME: check if the type/trait match what ty::Dynamic says?
+            }
+            ty::Slice(..) | ty::Str => {
+                let _len = meta.unwrap_meta().to_target_usize(self.ecx)?;
+                // We do not check that `len * elem_size <= isize::MAX`:
+                // that is only required for references, and there it falls out of the
+                // "dereferenceable" check performed by Stacked Borrows.
+            }
+            ty::Foreign(..) => {
+                // Unsized, but not wide.
+            }
+            _ => bug!("Unexpected unsized type tail: {:?}", tail),
+        }
+
+        Ok(())
+    }
+
+    /// Check a reference or `Box`.
+    fn check_safe_pointer(
+        &mut self,
+        value: &OpTy<'tcx, M::Provenance>,
+        ptr_kind: PointerKind,
+    ) -> InterpResult<'tcx> {
+        // Not using `deref_pointer` since we want to use our `read_immediate` wrapper.
+        let place = self.ecx.ref_to_mplace(&self.read_immediate(value, ptr_kind.into())?)?;
+        // Handle wide pointers.
+        // Check metadata early, for better diagnostics
+        if place.layout.is_unsized() {
+            self.check_wide_ptr_meta(place.meta(), place.layout)?;
+        }
+        // Make sure this is dereferenceable and all.
+        let size_and_align = try_validation!(
+            self.ecx.size_and_align_of_mplace(&place),
+            self.path,
+            Ub(InvalidMeta(msg)) => match msg {
+                InvalidMetaKind::SliceTooBig => InvalidMetaSliceTooLarge { ptr_kind },
+                InvalidMetaKind::TooBig => InvalidMetaTooLarge { ptr_kind },
+            }
+        );
+        let (size, align) = size_and_align
+            // for the purpose of validity, consider foreign types to have
+            // alignment and size determined by the layout (size will be 0,
+            // alignment should take attributes into account).
+            .unwrap_or_else(|| (place.layout.size, place.layout.align.abi));
+        // Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
+        try_validation!(
+            self.ecx.check_ptr_access(
+                place.ptr(),
+                size,
+                CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
+            ),
+            self.path,
+            Ub(DanglingIntPointer(0, _)) => NullPtr { ptr_kind },
+            Ub(DanglingIntPointer(i, _)) => DanglingPtrNoProvenance {
+                ptr_kind,
+                // FIXME this says "null pointer" when null but we need translate
+                pointer: format!("{}", Pointer::<Option<AllocId>>::from_addr_invalid(*i))
+            },
+            Ub(PointerOutOfBounds { .. }) => DanglingPtrOutOfBounds {
+                ptr_kind
+            },
+            // This cannot happen during const-eval (because interning already detects
+            // dangling pointers), but it can happen in Miri.
+            Ub(PointerUseAfterFree(..)) => DanglingPtrUseAfterFree {
+                ptr_kind,
+            },
+        );
+        try_validation!(
+            self.ecx.check_ptr_align(
+                place.ptr(),
+                align,
+            ),
+            self.path,
+            Ub(AlignmentCheckFailed(Misalignment { required, has }, _msg)) => UnalignedPtr {
+                ptr_kind,
+                required_bytes: required.bytes(),
+                found_bytes: has.bytes()
+            },
+        );
+        // Do not allow pointers to uninhabited types.
+        if place.layout.abi.is_uninhabited() {
+            let ty = place.layout.ty;
+            throw_validation_failure!(self.path, PtrToUninhabited { ptr_kind, ty })
+        }
+        // Recursive checking
+        if let Some(ref_tracking) = self.ref_tracking.as_deref_mut() {
+            // Determine whether this pointer expects to be pointing to something mutable.
+            let ptr_expected_mutbl = match ptr_kind {
+                PointerKind::Box => Mutability::Mut,
+                PointerKind::Ref(mutbl) => {
+                    // We do not take into account interior mutability here since we cannot know if
+                    // there really is an `UnsafeCell` inside `Option<UnsafeCell>` -- so we check
+                    // that in the recursive descent behind this reference (controlled by
+                    // `allow_immutable_unsafe_cell`).
+                    mutbl
+                }
+            };
+            // Proceed recursively even for ZST, no reason to skip them!
+            // `!` is a ZST and we want to validate it.
+            if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr()) {
+                let mut skip_recursive_check = false;
+                // Let's see what kind of memory this points to.
+                // `unwrap` since dangling pointers have already been handled.
+                let alloc_kind = self.ecx.tcx.try_get_global_alloc(alloc_id).unwrap();
+                let alloc_actual_mutbl = match alloc_kind {
+                    GlobalAlloc::Static(did) => {
+                        // Special handling for pointers to statics (irrespective of their type).
+                        assert!(!self.ecx.tcx.is_thread_local_static(did));
+                        assert!(self.ecx.tcx.is_static(did));
+                        // Mode-specific checks
+                        match self.ctfe_mode {
+                            Some(
+                                CtfeValidationMode::Static { .. }
+                                | CtfeValidationMode::Promoted { .. },
+                            ) => {
+                                // We skip recursively checking other statics. These statics must be sound by
+                                // themselves, and the only way to get broken statics here is by using
+                                // unsafe code.
+                                // The reasons we don't check other statics is twofold. For one, in all
+                                // sound cases, the static was already validated on its own, and second, we
+                                // trigger cycle errors if we try to compute the value of the other static
+                                // and that static refers back to us (potentially through a promoted).
+                                // This could miss some UB, but that's fine.
+                                skip_recursive_check = true;
+                            }
+                            Some(CtfeValidationMode::Const { .. }) => {
+                                // We can't recursively validate `extern static`, so we better reject them.
+                                if self.ecx.tcx.is_foreign_item(did) {
+                                    throw_validation_failure!(self.path, ConstRefToExtern);
+                                }
+                            }
+                            None => {}
+                        }
+                        // Return alloc mutability. For "root" statics we look at the type to account for interior
+                        // mutability; for nested statics we have no type and directly use the annotated mutability.
+                        let DefKind::Static { mutability, nested } = self.ecx.tcx.def_kind(did)
+                        else {
+                            bug!()
+                        };
+                        match (mutability, nested) {
+                            (Mutability::Mut, _) => Mutability::Mut,
+                            (Mutability::Not, true) => Mutability::Not,
+                            (Mutability::Not, false)
+                                if !self
+                                    .ecx
+                                    .tcx
+                                    .type_of(did)
+                                    .no_bound_vars()
+                                    .expect("statics should not have generic parameters")
+                                    .is_freeze(*self.ecx.tcx, ty::ParamEnv::reveal_all()) =>
+                            {
+                                Mutability::Mut
+                            }
+                            (Mutability::Not, false) => Mutability::Not,
+                        }
+                    }
+                    GlobalAlloc::Memory(alloc) => alloc.inner().mutability,
+                    GlobalAlloc::Function(..) | GlobalAlloc::VTable(..) => {
+                        // These are immutable, we better don't allow mutable pointers here.
+                        Mutability::Not
+                    }
+                };
+                // Mutability check.
+                // If this allocation has size zero, there is no actual mutability here.
+                let (size, _align, _alloc_kind) = self.ecx.get_alloc_info(alloc_id);
+                if size != Size::ZERO {
+                    // Mutable pointer to immutable memory is no good.
+                    if ptr_expected_mutbl == Mutability::Mut
+                        && alloc_actual_mutbl == Mutability::Not
+                    {
+                        throw_validation_failure!(self.path, MutableRefToImmutable);
+                    }
+                    // In a const, everything must be completely immutable.
+                    if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. })) {
+                        if ptr_expected_mutbl == Mutability::Mut
+                            || alloc_actual_mutbl == Mutability::Mut
+                        {
+                            throw_validation_failure!(self.path, ConstRefToMutable);
+                        }
+                    }
+                }
+                // Potentially skip recursive check.
+                if skip_recursive_check {
+                    return Ok(());
+                }
+            }
+            let path = &self.path;
+            ref_tracking.track(place, || {
+                // We need to clone the path anyway, make sure it gets created
+                // with enough space for the additional `Deref`.
+                let mut new_path = Vec::with_capacity(path.len() + 1);
+                new_path.extend(path);
+                new_path.push(PathElem::Deref);
+                new_path
+            });
+        }
+        Ok(())
+    }
+
+    /// Check if this is a value of primitive type, and if yes check the validity of the value
+    /// at that type. Return `true` if the type is indeed primitive.
+    ///
+    /// Note that not all of these have `FieldsShape::Primitive`, e.g. wide references.
+    fn try_visit_primitive(
+        &mut self,
+        value: &OpTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, bool> {
+        // Go over all the primitive types
+        let ty = value.layout.ty;
+        match ty.kind() {
+            ty::Bool => {
+                let value = self.read_scalar(value, ExpectedKind::Bool)?;
+                try_validation!(
+                    value.to_bool(),
+                    self.path,
+                    Ub(InvalidBool(..)) => ValidationErrorKind::InvalidBool {
+                        value: format!("{value:x}"),
+                    }
+                );
+                Ok(true)
+            }
+            ty::Char => {
+                let value = self.read_scalar(value, ExpectedKind::Char)?;
+                try_validation!(
+                    value.to_char(),
+                    self.path,
+                    Ub(InvalidChar(..)) => ValidationErrorKind::InvalidChar {
+                        value: format!("{value:x}"),
+                    }
+                );
+                Ok(true)
+            }
+            ty::Float(_) | ty::Int(_) | ty::Uint(_) => {
+                // NOTE: Keep this in sync with the array optimization for int/float
+                // types below!
+                self.read_scalar(
+                    value,
+                    if matches!(ty.kind(), ty::Float(..)) {
+                        ExpectedKind::Float
+                    } else {
+                        ExpectedKind::Int
+                    },
+                )?;
+                Ok(true)
+            }
+            ty::RawPtr(..) => {
+                let place =
+                    self.ecx.ref_to_mplace(&self.read_immediate(value, ExpectedKind::RawPtr)?)?;
+                if place.layout.is_unsized() {
+                    self.check_wide_ptr_meta(place.meta(), place.layout)?;
+                }
+                Ok(true)
+            }
+            ty::Ref(_, _ty, mutbl) => {
+                self.check_safe_pointer(value, PointerKind::Ref(*mutbl))?;
+                Ok(true)
+            }
+            ty::FnPtr(_sig) => {
+                let value = self.read_scalar(value, ExpectedKind::FnPtr)?;
+
+                // If we check references recursively, also check that this points to a function.
+                if let Some(_) = self.ref_tracking {
+                    let ptr = value.to_pointer(self.ecx)?;
+                    let _fn = try_validation!(
+                        self.ecx.get_ptr_fn(ptr),
+                        self.path,
+                        Ub(DanglingIntPointer(..) | InvalidFunctionPointer(..)) =>
+                            InvalidFnPtr { value: format!("{ptr}") },
+                    );
+                    // FIXME: Check if the signature matches
+                } else {
+                    // Otherwise (for standalone Miri), we have to still check it to be non-null.
+                    if self.ecx.scalar_may_be_null(value)? {
+                        throw_validation_failure!(self.path, NullFnPtr);
+                    }
+                }
+                Ok(true)
+            }
+            ty::Never => throw_validation_failure!(self.path, NeverVal),
+            ty::Foreign(..) | ty::FnDef(..) => {
+                // Nothing to check.
+                Ok(true)
+            }
+            // The above should be all the primitive types. The rest is compound, we
+            // check them by visiting their fields/variants.
+            ty::Adt(..)
+            | ty::Tuple(..)
+            | ty::Array(..)
+            | ty::Slice(..)
+            | ty::Str
+            | ty::Dynamic(..)
+            | ty::Closure(..)
+            | ty::CoroutineClosure(..)
+            | ty::Coroutine(..) => Ok(false),
+            // Some types only occur during typechecking, they have no layout.
+            // We should not see them here and we could not check them anyway.
+            ty::Error(_)
+            | ty::Infer(..)
+            | ty::Placeholder(..)
+            | ty::Bound(..)
+            | ty::Param(..)
+            | ty::Alias(..)
+            | ty::CoroutineWitness(..) => bug!("Encountered invalid type {:?}", ty),
+        }
+    }
+
+    fn visit_scalar(
+        &mut self,
+        scalar: Scalar<M::Provenance>,
+        scalar_layout: ScalarAbi,
+    ) -> InterpResult<'tcx> {
+        let size = scalar_layout.size(self.ecx);
+        let valid_range = scalar_layout.valid_range(self.ecx);
+        let WrappingRange { start, end } = valid_range;
+        let max_value = size.unsigned_int_max();
+        assert!(end <= max_value);
+        let bits = match scalar.try_to_int() {
+            Ok(int) => int.assert_bits(size),
+            Err(_) => {
+                // So this is a pointer then, and casting to an int failed.
+                // Can only happen during CTFE.
+                // We support 2 kinds of ranges here: full range, and excluding zero.
+                if start == 1 && end == max_value {
+                    // Only null is the niche. So make sure the ptr is NOT null.
+                    if self.ecx.scalar_may_be_null(scalar)? {
+                        throw_validation_failure!(
+                            self.path,
+                            NullablePtrOutOfRange { range: valid_range, max_value }
+                        )
+                    } else {
+                        return Ok(());
+                    }
+                } else if scalar_layout.is_always_valid(self.ecx) {
+                    // Easy. (This is reachable if `enforce_number_validity` is set.)
+                    return Ok(());
+                } else {
+                    // Conservatively, we reject, because the pointer *could* have a bad
+                    // value.
+                    throw_validation_failure!(
+                        self.path,
+                        PtrOutOfRange { range: valid_range, max_value }
+                    )
+                }
+            }
+        };
+        // Now compare.
+        if valid_range.contains(bits) {
+            Ok(())
+        } else {
+            throw_validation_failure!(
+                self.path,
+                OutOfRange { value: format!("{bits}"), range: valid_range, max_value }
+            )
+        }
+    }
+
+    fn in_mutable_memory(&self, op: &OpTy<'tcx, M::Provenance>) -> bool {
+        if let Some(mplace) = op.as_mplace_or_imm().left() {
+            if let Some(alloc_id) = mplace.ptr().provenance.and_then(|p| p.get_alloc_id()) {
+                let mutability = match self.ecx.tcx.global_alloc(alloc_id) {
+                    GlobalAlloc::Static(_) => {
+                        self.ecx.memory.alloc_map.get(alloc_id).unwrap().1.mutability
+                    }
+                    GlobalAlloc::Memory(alloc) => alloc.inner().mutability,
+                    _ => span_bug!(self.ecx.tcx.span, "not a memory allocation"),
+                };
+                return mutability == Mutability::Mut;
+            }
+        }
+        false
+    }
+}
+
+impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
+    for ValidityVisitor<'rt, 'mir, 'tcx, M>
+{
+    type V = OpTy<'tcx, M::Provenance>;
+
+    #[inline(always)]
+    fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
+        self.ecx
+    }
+
+    fn read_discriminant(
+        &mut self,
+        op: &OpTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, VariantIdx> {
+        self.with_elem(PathElem::EnumTag, move |this| {
+            Ok(try_validation!(
+                this.ecx.read_discriminant(op),
+                this.path,
+                Ub(InvalidTag(val)) => InvalidEnumTag {
+                    value: format!("{val:x}"),
+                },
+                Ub(UninhabitedEnumVariantRead(_)) => UninhabitedEnumVariant,
+                // Uninit / bad provenance are not possible since the field was already previously
+                // checked at its integer type.
+            ))
+        })
+    }
+
+    #[inline]
+    fn visit_field(
+        &mut self,
+        old_op: &OpTy<'tcx, M::Provenance>,
+        field: usize,
+        new_op: &OpTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        let elem = self.aggregate_field_path_elem(old_op.layout, field);
+        self.with_elem(elem, move |this| this.visit_value(new_op))
+    }
+
+    #[inline]
+    fn visit_variant(
+        &mut self,
+        old_op: &OpTy<'tcx, M::Provenance>,
+        variant_id: VariantIdx,
+        new_op: &OpTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        let name = match old_op.layout.ty.kind() {
+            ty::Adt(adt, _) => PathElem::Variant(adt.variant(variant_id).name),
+            // Coroutines also have variants
+            ty::Coroutine(..) => PathElem::CoroutineState(variant_id),
+            _ => bug!("Unexpected type with variant: {:?}", old_op.layout.ty),
+        };
+        self.with_elem(name, move |this| this.visit_value(new_op))
+    }
+
+    #[inline(always)]
+    fn visit_union(
+        &mut self,
+        op: &OpTy<'tcx, M::Provenance>,
+        _fields: NonZero<usize>,
+    ) -> InterpResult<'tcx> {
+        // Special check for CTFE validation, preventing `UnsafeCell` inside unions in immutable memory.
+        if self.ctfe_mode.is_some_and(|c| !c.allow_immutable_unsafe_cell()) {
+            if !op.layout.is_zst() && !op.layout.ty.is_freeze(*self.ecx.tcx, self.ecx.param_env) {
+                if !self.in_mutable_memory(op) {
+                    throw_validation_failure!(self.path, UnsafeCellInImmutable);
+                }
+            }
+        }
+        Ok(())
+    }
+
+    #[inline]
+    fn visit_box(
+        &mut self,
+        _box_ty: Ty<'tcx>,
+        op: &OpTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        self.check_safe_pointer(op, PointerKind::Box)?;
+        Ok(())
+    }
+
+    #[inline]
+    fn visit_value(&mut self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+        trace!("visit_value: {:?}, {:?}", *op, op.layout);
+
+        // Check primitive types -- the leaves of our recursive descent.
+        if self.try_visit_primitive(op)? {
+            return Ok(());
+        }
+
+        // Special check preventing `UnsafeCell` in the inner part of constants
+        if self.ctfe_mode.is_some_and(|c| !c.allow_immutable_unsafe_cell()) {
+            if !op.layout.is_zst()
+                && let Some(def) = op.layout.ty.ty_adt_def()
+                && def.is_unsafe_cell()
+            {
+                if !self.in_mutable_memory(op) {
+                    throw_validation_failure!(self.path, UnsafeCellInImmutable);
+                }
+            }
+        }
+
+        // Recursively walk the value at its type. Apply optimizations for some large types.
+        match op.layout.ty.kind() {
+            ty::Str => {
+                let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate
+                let len = mplace.len(self.ecx)?;
+                try_validation!(
+                    self.ecx.read_bytes_ptr_strip_provenance(mplace.ptr(), Size::from_bytes(len)),
+                    self.path,
+                    Ub(InvalidUninitBytes(..)) => Uninit { expected: ExpectedKind::Str },
+                    Unsup(ReadPointerAsInt(_)) => PointerAsInt { expected: ExpectedKind::Str }
+                );
+            }
+            ty::Array(tys, ..) | ty::Slice(tys)
+                // This optimization applies for types that can hold arbitrary bytes (such as
+                // integer and floating point types) or for structs or tuples with no fields.
+                // FIXME(wesleywiser) This logic could be extended further to arbitrary structs
+                // or tuples made up of integer/floating point types or inhabited ZSTs with no
+                // padding.
+                if matches!(tys.kind(), ty::Int(..) | ty::Uint(..) | ty::Float(..))
+                =>
+            {
+                let expected = if tys.is_integral() { ExpectedKind::Int } else { ExpectedKind::Float };
+                // Optimized handling for arrays of integer/float type.
+
+                // This is the length of the array/slice.
+                let len = op.len(self.ecx)?;
+                // This is the element type size.
+                let layout = self.ecx.layout_of(*tys)?;
+                // This is the size in bytes of the whole array. (This checks for overflow.)
+                let size = layout.size * len;
+                // If the size is 0, there is nothing to check.
+                // (`size` can only be 0 of `len` is 0, and empty arrays are always valid.)
+                if size == Size::ZERO {
+                    return Ok(());
+                }
+                // Now that we definitely have a non-ZST array, we know it lives in memory.
+                let mplace = match op.as_mplace_or_imm() {
+                    Left(mplace) => mplace,
+                    Right(imm) => match *imm {
+                        Immediate::Uninit =>
+                            throw_validation_failure!(self.path, Uninit { expected }),
+                        Immediate::Scalar(..) | Immediate::ScalarPair(..) =>
+                            bug!("arrays/slices can never have Scalar/ScalarPair layout"),
+                    }
+                };
+
+                // Optimization: we just check the entire range at once.
+                // NOTE: Keep this in sync with the handling of integer and float
+                // types above, in `visit_primitive`.
+                // No need for an alignment check here, this is not an actual memory access.
+                let alloc = self.ecx.get_ptr_alloc(mplace.ptr(), size)?.expect("we already excluded size 0");
+
+                match alloc.get_bytes_strip_provenance() {
+                    // In the happy case, we needn't check anything else.
+                    Ok(_) => {}
+                    // Some error happened, try to provide a more detailed description.
+                    Err(err) => {
+                        // For some errors we might be able to provide extra information.
+                        // (This custom logic does not fit the `try_validation!` macro.)
+                        match err.kind() {
+                            Ub(InvalidUninitBytes(Some((_alloc_id, access)))) | Unsup(ReadPointerAsInt(Some((_alloc_id, access)))) => {
+                                // Some byte was uninitialized, determine which
+                                // element that byte belongs to so we can
+                                // provide an index.
+                                let i = usize::try_from(
+                                    access.bad.start.bytes() / layout.size.bytes(),
+                                )
+                                .unwrap();
+                                self.path.push(PathElem::ArrayElem(i));
+
+                                if matches!(err.kind(), Ub(InvalidUninitBytes(_))) {
+                                    throw_validation_failure!(self.path, Uninit { expected })
+                                } else {
+                                    throw_validation_failure!(self.path, PointerAsInt { expected })
+                                }
+                            }
+
+                            // Propagate upwards (that will also check for unexpected errors).
+                            _ => return Err(err),
+                        }
+                    }
+                }
+            }
+            // Fast path for arrays and slices of ZSTs. We only need to check a single ZST element
+            // of an array and not all of them, because there's only a single value of a specific
+            // ZST type, so either validation fails for all elements or none.
+            ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(*tys)?.is_zst() => {
+                // Validate just the first element (if any).
+                if op.len(self.ecx)? > 0 {
+                    self.visit_field(op, 0, &self.ecx.project_index(op, 0)?)?;
+                }
+            }
+            _ => {
+                self.walk_value(op)?; // default handler
+            }
+        }
+
+        // *After* all of this, check the ABI. We need to check the ABI to handle
+        // types like `NonNull` where the `Scalar` info is more restrictive than what
+        // the fields say (`rustc_layout_scalar_valid_range_start`).
+        // But in most cases, this will just propagate what the fields say,
+        // and then we want the error to point at the field -- so, first recurse,
+        // then check ABI.
+        //
+        // FIXME: We could avoid some redundant checks here. For newtypes wrapping
+        // scalars, we do the same check on every "level" (e.g., first we check
+        // MyNewtype and then the scalar in there).
+        match op.layout.abi {
+            Abi::Uninhabited => {
+                let ty = op.layout.ty;
+                throw_validation_failure!(self.path, UninhabitedVal { ty });
+            }
+            Abi::Scalar(scalar_layout) => {
+                if !scalar_layout.is_uninit_valid() {
+                    // There is something to check here.
+                    let scalar = self.read_scalar(op, ExpectedKind::InitScalar)?;
+                    self.visit_scalar(scalar, scalar_layout)?;
+                }
+            }
+            Abi::ScalarPair(a_layout, b_layout) => {
+                // We can only proceed if *both* scalars need to be initialized.
+                // FIXME: find a way to also check ScalarPair when one side can be uninit but
+                // the other must be init.
+                if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
+                    let (a, b) =
+                        self.read_immediate(op, ExpectedKind::InitScalar)?.to_scalar_pair();
+                    self.visit_scalar(a, a_layout)?;
+                    self.visit_scalar(b, b_layout)?;
+                }
+            }
+            Abi::Vector { .. } => {
+                // No checks here, we assume layout computation gets this right.
+                // (This is harder to check since Miri does not represent these as `Immediate`. We
+                // also cannot use field projections since this might be a newtype around a vector.)
+            }
+            Abi::Aggregate { .. } => {
+                // Nothing to do.
+            }
+        }
+
+        Ok(())
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    fn validate_operand_internal(
+        &self,
+        op: &OpTy<'tcx, M::Provenance>,
+        path: Vec<PathElem>,
+        ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>>,
+        ctfe_mode: Option<CtfeValidationMode>,
+    ) -> InterpResult<'tcx> {
+        trace!("validate_operand_internal: {:?}, {:?}", *op, op.layout.ty);
+
+        // Construct a visitor
+        let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self };
+
+        // Run it.
+        match self.run_for_validation(|| visitor.visit_value(op)) {
+            Ok(()) => Ok(()),
+            // Pass through validation failures and "invalid program" issues.
+            Err(err)
+                if matches!(
+                    err.kind(),
+                    err_ub!(ValidationError { .. }) | InterpError::InvalidProgram(_)
+                ) =>
+            {
+                Err(err)
+            }
+            // Complain about any other kind of error -- those are bad because we'd like to
+            // report them in a way that shows *where* in the value the issue lies.
+            Err(err) => {
+                bug!(
+                    "Unexpected error during validation: {}",
+                    format_interp_error(self.tcx.dcx(), err)
+                );
+            }
+        }
+    }
+
+    /// This function checks the data at `op` to be const-valid.
+    /// `op` is assumed to cover valid memory if it is an indirect operand.
+    /// It will error if the bits at the destination do not match the ones described by the layout.
+    ///
+    /// `ref_tracking` is used to record references that we encounter so that they
+    /// can be checked recursively by an outside driving loop.
+    ///
+    /// `constant` controls whether this must satisfy the rules for constants:
+    /// - no pointers to statics.
+    /// - no `UnsafeCell` or non-ZST `&mut`.
+    #[inline(always)]
+    pub(crate) fn const_validate_operand(
+        &self,
+        op: &OpTy<'tcx, M::Provenance>,
+        path: Vec<PathElem>,
+        ref_tracking: &mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>,
+        ctfe_mode: CtfeValidationMode,
+    ) -> InterpResult<'tcx> {
+        self.validate_operand_internal(op, path, Some(ref_tracking), Some(ctfe_mode))
+    }
+
+    /// This function checks the data at `op` to be runtime-valid.
+    /// `op` is assumed to cover valid memory if it is an indirect operand.
+    /// It will error if the bits at the destination do not match the ones described by the layout.
+    #[inline(always)]
+    pub fn validate_operand(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+        // Note that we *could* actually be in CTFE here with `-Zextra-const-ub-checks`, but it's
+        // still correct to not use `ctfe_mode`: that mode is for validation of the final constant
+        // value, it rules out things like `UnsafeCell` in awkward places. It also can make checking
+        // recurse through references which, for now, we don't want here, either.
+        self.validate_operand_internal(op, vec![], None, None)
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
new file mode 100644
index 00000000000..0e824f3f592
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -0,0 +1,213 @@
+//! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound
+//! types until we arrive at the leaves, with custom handling for primitive types.
+
+use rustc_index::IndexVec;
+use rustc_middle::mir::interpret::InterpResult;
+use rustc_middle::ty::{self, Ty};
+use rustc_target::abi::FieldIdx;
+use rustc_target::abi::{FieldsShape, VariantIdx, Variants};
+
+use std::num::NonZero;
+
+use super::{InterpCx, MPlaceTy, Machine, Projectable};
+
+/// How to traverse a value and what to do when we are at the leaves.
+pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
+    type V: Projectable<'tcx, M::Provenance> + From<MPlaceTy<'tcx, M::Provenance>>;
+
+    /// The visitor must have an `InterpCx` in it.
+    fn ecx(&self) -> &InterpCx<'mir, 'tcx, M>;
+
+    /// `read_discriminant` can be hooked for better error messages.
+    #[inline(always)]
+    fn read_discriminant(&mut self, v: &Self::V) -> InterpResult<'tcx, VariantIdx> {
+        self.ecx().read_discriminant(&v.to_op(self.ecx())?)
+    }
+
+    /// This function provides the chance to reorder the order in which fields are visited for
+    /// `FieldsShape::Aggregate`: The order of fields will be
+    /// `(0..num_fields).map(aggregate_field_order)`.
+    ///
+    /// The default means we iterate in source declaration order; alternative this can do an inverse
+    /// lookup in `memory_index` to use memory field order instead.
+    #[inline(always)]
+    fn aggregate_field_order(_memory_index: &IndexVec<FieldIdx, u32>, idx: usize) -> usize {
+        idx
+    }
+
+    // Recursive actions, ready to be overloaded.
+    /// Visits the given value, dispatching as appropriate to more specialized visitors.
+    #[inline(always)]
+    fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
+        self.walk_value(v)
+    }
+    /// Visits the given value as a union. No automatic recursion can happen here.
+    #[inline(always)]
+    fn visit_union(&mut self, _v: &Self::V, _fields: NonZero<usize>) -> InterpResult<'tcx> {
+        Ok(())
+    }
+    /// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.
+    /// The type of `v` will be a raw pointer to `T`, but this is a field of `Box<T>` and the
+    /// pointee type is the actual `T`. `box_ty` provides the full type of the `Box` itself.
+    #[inline(always)]
+    fn visit_box(&mut self, _box_ty: Ty<'tcx>, _v: &Self::V) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Called each time we recurse down to a field of a "product-like" aggregate
+    /// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
+    /// and new (inner) value.
+    /// This gives the visitor the chance to track the stack of nested fields that
+    /// we are descending through.
+    #[inline(always)]
+    fn visit_field(
+        &mut self,
+        _old_val: &Self::V,
+        _field: usize,
+        new_val: &Self::V,
+    ) -> InterpResult<'tcx> {
+        self.visit_value(new_val)
+    }
+    /// Called when recursing into an enum variant.
+    /// This gives the visitor the chance to track the stack of nested fields that
+    /// we are descending through.
+    #[inline(always)]
+    fn visit_variant(
+        &mut self,
+        _old_val: &Self::V,
+        _variant: VariantIdx,
+        new_val: &Self::V,
+    ) -> InterpResult<'tcx> {
+        self.visit_value(new_val)
+    }
+
+    fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
+        let ty = v.layout().ty;
+        trace!("walk_value: type: {ty}");
+
+        // Special treatment for special types, where the (static) layout is not sufficient.
+        match *ty.kind() {
+            // If it is a trait object, switch to the real type that was used to create it.
+            ty::Dynamic(_, _, ty::Dyn) => {
+                // Dyn types. This is unsized, and the actual dynamic type of the data is given by the
+                // vtable stored in the place metadata.
+                // unsized values are never immediate, so we can assert_mem_place
+                let op = v.to_op(self.ecx())?;
+                let dest = op.assert_mem_place();
+                let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0;
+                trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
+                // recurse with the inner type
+                return self.visit_field(v, 0, &inner_mplace.into());
+            }
+            ty::Dynamic(_, _, ty::DynStar) => {
+                // DynStar types. Very different from a dyn type (but strangely part of the
+                // same variant in `TyKind`): These are pairs where the 2nd component is the
+                // vtable, and the first component is the data (which must be ptr-sized).
+                let data = self.ecx().unpack_dyn_star(v)?.0;
+                return self.visit_field(v, 0, &data);
+            }
+            // Slices do not need special handling here: they have `Array` field
+            // placement with length 0, so we enter the `Array` case below which
+            // indirectly uses the metadata to determine the actual length.
+
+            // However, `Box`... let's talk about `Box`.
+            ty::Adt(def, ..) if def.is_box() => {
+                // `Box` is a hybrid primitive-library-defined type that one the one hand is
+                // a dereferenceable pointer, on the other hand has *basically arbitrary
+                // user-defined layout* since the user controls the 'allocator' field. So it
+                // cannot be treated like a normal pointer, since it does not fit into an
+                // `Immediate`. Yeah, it is quite terrible. But many visitors want to do
+                // something with "all boxed pointers", so we handle this mess for them.
+                //
+                // When we hit a `Box`, we do not do the usual field recursion; instead,
+                // we (a) call `visit_box` on the pointer value, and (b) recurse on the
+                // allocator field. We also assert tons of things to ensure we do not miss
+                // any other fields.
+
+                // `Box` has two fields: the pointer we care about, and the allocator.
+                assert_eq!(v.layout().fields.count(), 2, "`Box` must have exactly 2 fields");
+                let (unique_ptr, alloc) =
+                    (self.ecx().project_field(v, 0)?, self.ecx().project_field(v, 1)?);
+                // Unfortunately there is some type junk in the way here: `unique_ptr` is a `Unique`...
+                // (which means another 2 fields, the second of which is a `PhantomData`)
+                assert_eq!(unique_ptr.layout().fields.count(), 2);
+                let (nonnull_ptr, phantom) = (
+                    self.ecx().project_field(&unique_ptr, 0)?,
+                    self.ecx().project_field(&unique_ptr, 1)?,
+                );
+                assert!(
+                    phantom.layout().ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()),
+                    "2nd field of `Unique` should be PhantomData but is {:?}",
+                    phantom.layout().ty,
+                );
+                // ... that contains a `NonNull`... (gladly, only a single field here)
+                assert_eq!(nonnull_ptr.layout().fields.count(), 1);
+                let raw_ptr = self.ecx().project_field(&nonnull_ptr, 0)?; // the actual raw ptr
+                // ... whose only field finally is a raw ptr we can dereference.
+                self.visit_box(ty, &raw_ptr)?;
+
+                // The second `Box` field is the allocator, which we recursively check for validity
+                // like in regular structs.
+                self.visit_field(v, 1, &alloc)?;
+
+                // We visited all parts of this one.
+                return Ok(());
+            }
+
+            // Non-normalized types should never show up here.
+            ty::Param(..)
+            | ty::Alias(..)
+            | ty::Bound(..)
+            | ty::Placeholder(..)
+            | ty::Infer(..)
+            | ty::Error(..) => throw_inval!(TooGeneric),
+
+            // The rest is handled below.
+            _ => {}
+        };
+
+        // Visit the fields of this value.
+        match &v.layout().fields {
+            FieldsShape::Primitive => {}
+            &FieldsShape::Union(fields) => {
+                self.visit_union(v, fields)?;
+            }
+            FieldsShape::Arbitrary { offsets, memory_index } => {
+                for idx in 0..offsets.len() {
+                    let idx = Self::aggregate_field_order(memory_index, idx);
+                    let field = self.ecx().project_field(v, idx)?;
+                    self.visit_field(v, idx, &field)?;
+                }
+            }
+            FieldsShape::Array { .. } => {
+                let mut iter = self.ecx().project_array_fields(v)?;
+                while let Some((idx, field)) = iter.next(self.ecx())? {
+                    self.visit_field(v, idx.try_into().unwrap(), &field)?;
+                }
+            }
+        }
+
+        match v.layout().variants {
+            // If this is a multi-variant layout, find the right variant and proceed
+            // with *its* fields.
+            Variants::Multiple { .. } => {
+                let idx = self.read_discriminant(v)?;
+                // There are 3 cases where downcasts can turn a Scalar/ScalarPair into a different ABI which
+                // could be a problem for `ImmTy` (see layout_sanity_check):
+                // - variant.size == Size::ZERO: works fine because `ImmTy::offset` has a special case for
+                //   zero-sized layouts.
+                // - variant.fields.count() == 0: works fine because `ImmTy::offset` has a special case for
+                //   zero-field aggregates.
+                // - variant.abi.is_uninhabited(): triggers UB in `read_discriminant` so we never get here.
+                let inner = self.ecx().project_downcast(v, idx)?;
+                trace!("walk_value: variant layout: {:#?}", inner.layout());
+                // recurse with the inner type
+                self.visit_variant(v, idx, &inner)?;
+            }
+            // For single-variant layouts, we already did anything there is to do.
+            Variants::Single { .. } => {}
+        }
+
+        Ok(())
+    }
+}
diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs
new file mode 100644
index 00000000000..1e7ee208af1
--- /dev/null
+++ b/compiler/rustc_const_eval/src/lib.rs
@@ -0,0 +1,59 @@
+/*!
+
+Rust MIR: a lowered representation of Rust.
+
+*/
+
+#![allow(internal_features)]
+#![allow(rustc::diagnostic_outside_of_impl)]
+#![feature(rustdoc_internals)]
+#![doc(rust_logo)]
+#![feature(assert_matches)]
+#![feature(box_patterns)]
+#![feature(decl_macro)]
+#![feature(generic_nonzero)]
+#![feature(let_chains)]
+#![feature(slice_ptr_get)]
+#![feature(strict_provenance)]
+#![feature(never_type)]
+#![feature(trait_alias)]
+#![feature(try_blocks)]
+#![feature(yeet_expr)]
+#![feature(if_let_guard)]
+
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+
+pub mod const_eval;
+mod errors;
+pub mod interpret;
+pub mod transform;
+pub mod util;
+
+pub use errors::ReportErrorExt;
+
+use rustc_middle::{ty, util::Providers};
+
+rustc_fluent_macro::fluent_messages! { "../messages.ftl" }
+
+pub fn provide(providers: &mut Providers) {
+    const_eval::provide(providers);
+    providers.eval_to_const_value_raw = const_eval::eval_to_const_value_raw_provider;
+    providers.eval_to_allocation_raw = const_eval::eval_to_allocation_raw_provider;
+    providers.eval_static_initializer = const_eval::eval_static_initializer_provider;
+    providers.hooks.const_caller_location = util::caller_location::const_caller_location_provider;
+    providers.eval_to_valtree = |tcx, param_env_and_value| {
+        let (param_env, raw) = param_env_and_value.into_parts();
+        const_eval::eval_to_valtree(tcx, param_env, raw)
+    };
+    providers.hooks.try_destructure_mir_constant_for_user_output =
+        const_eval::try_destructure_mir_constant_for_user_output;
+    providers.valtree_to_const_val = |tcx, (ty, valtree)| {
+        const_eval::valtree_to_const_value(tcx, ty::ParamEnv::empty().and(ty), valtree)
+    };
+    providers.check_validity_requirement = |tcx, (init_kind, param_env_and_ty)| {
+        util::check_validity_requirement(tcx, init_kind, param_env_and_ty)
+    };
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
new file mode 100644
index 00000000000..a93e8138aa4
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -0,0 +1,1006 @@
+//! The `Visitor` responsible for actually checking a `mir::Body` for invalid operations.
+
+use rustc_errors::{Diag, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_index::bit_set::BitSet;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::ObligationCause;
+use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, adjustment::PointerCoercion, Ty, TyCtxt};
+use rustc_middle::ty::{Instance, InstanceDef, TypeVisitableExt};
+use rustc_mir_dataflow::Analysis;
+use rustc_span::{sym, Span, Symbol};
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
+use rustc_trait_selection::traits::{self, ObligationCauseCode, ObligationCtxt};
+use rustc_type_ir::visit::{TypeSuperVisitable, TypeVisitor};
+
+use std::mem;
+use std::ops::Deref;
+
+use super::ops::{self, NonConstOp, Status};
+use super::qualifs::{self, HasMutInterior, NeedsDrop, NeedsNonConstDrop};
+use super::resolver::FlowSensitiveAnalysis;
+use super::{ConstCx, Qualif};
+use crate::const_eval::is_unstable_const_fn;
+use crate::errors::UnstableInStable;
+
+type QualifResults<'mir, 'tcx, Q> =
+    rustc_mir_dataflow::ResultsCursor<'mir, 'tcx, FlowSensitiveAnalysis<'mir, 'mir, 'tcx, Q>>;
+
+#[derive(Default)]
+pub(crate) struct Qualifs<'mir, 'tcx> {
+    has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>,
+    needs_drop: Option<QualifResults<'mir, 'tcx, NeedsDrop>>,
+    needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>,
+}
+
+impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
+    /// Returns `true` if `local` is `NeedsDrop` at the given `Location`.
+    ///
+    /// Only updates the cursor if absolutely necessary
+    pub fn needs_drop(
+        &mut self,
+        ccx: &'mir ConstCx<'mir, 'tcx>,
+        local: Local,
+        location: Location,
+    ) -> bool {
+        let ty = ccx.body.local_decls[local].ty;
+        // Peeking into opaque types causes cycles if the current function declares said opaque
+        // type. Thus we avoid short circuiting on the type and instead run the more expensive
+        // analysis that looks at the actual usage within this function
+        if !ty.has_opaque_types() && !NeedsDrop::in_any_value_of_ty(ccx, ty) {
+            return false;
+        }
+
+        let needs_drop = self.needs_drop.get_or_insert_with(|| {
+            let ConstCx { tcx, body, .. } = *ccx;
+
+            FlowSensitiveAnalysis::new(NeedsDrop, ccx)
+                .into_engine(tcx, body)
+                .iterate_to_fixpoint()
+                .into_results_cursor(body)
+        });
+
+        needs_drop.seek_before_primary_effect(location);
+        needs_drop.get().contains(local)
+    }
+
+    /// Returns `true` if `local` is `NeedsNonConstDrop` at the given `Location`.
+    ///
+    /// Only updates the cursor if absolutely necessary
+    pub fn needs_non_const_drop(
+        &mut self,
+        ccx: &'mir ConstCx<'mir, 'tcx>,
+        local: Local,
+        location: Location,
+    ) -> bool {
+        let ty = ccx.body.local_decls[local].ty;
+        // Peeking into opaque types causes cycles if the current function declares said opaque
+        // type. Thus we avoid short circuiting on the type and instead run the more expensive
+        // analysis that looks at the actual usage within this function
+        if !ty.has_opaque_types() && !NeedsNonConstDrop::in_any_value_of_ty(ccx, ty) {
+            return false;
+        }
+
+        let needs_non_const_drop = self.needs_non_const_drop.get_or_insert_with(|| {
+            let ConstCx { tcx, body, .. } = *ccx;
+
+            FlowSensitiveAnalysis::new(NeedsNonConstDrop, ccx)
+                .into_engine(tcx, body)
+                .iterate_to_fixpoint()
+                .into_results_cursor(body)
+        });
+
+        needs_non_const_drop.seek_before_primary_effect(location);
+        needs_non_const_drop.get().contains(local)
+    }
+
+    /// Returns `true` if `local` is `HasMutInterior` at the given `Location`.
+    ///
+    /// Only updates the cursor if absolutely necessary.
+    pub fn has_mut_interior(
+        &mut self,
+        ccx: &'mir ConstCx<'mir, 'tcx>,
+        local: Local,
+        location: Location,
+    ) -> bool {
+        let ty = ccx.body.local_decls[local].ty;
+        // Peeking into opaque types causes cycles if the current function declares said opaque
+        // type. Thus we avoid short circuiting on the type and instead run the more expensive
+        // analysis that looks at the actual usage within this function
+        if !ty.has_opaque_types() && !HasMutInterior::in_any_value_of_ty(ccx, ty) {
+            return false;
+        }
+
+        let has_mut_interior = self.has_mut_interior.get_or_insert_with(|| {
+            let ConstCx { tcx, body, .. } = *ccx;
+
+            FlowSensitiveAnalysis::new(HasMutInterior, ccx)
+                .into_engine(tcx, body)
+                .iterate_to_fixpoint()
+                .into_results_cursor(body)
+        });
+
+        has_mut_interior.seek_before_primary_effect(location);
+        has_mut_interior.get().contains(local)
+    }
+
+    fn in_return_place(
+        &mut self,
+        ccx: &'mir ConstCx<'mir, 'tcx>,
+        tainted_by_errors: Option<ErrorGuaranteed>,
+    ) -> ConstQualifs {
+        // Find the `Return` terminator if one exists.
+        //
+        // If no `Return` terminator exists, this MIR is divergent. Just return the conservative
+        // qualifs for the return type.
+        let return_block = ccx
+            .body
+            .basic_blocks
+            .iter_enumerated()
+            .find(|(_, block)| matches!(block.terminator().kind, TerminatorKind::Return))
+            .map(|(bb, _)| bb);
+
+        let Some(return_block) = return_block else {
+            return qualifs::in_any_value_of_ty(ccx, ccx.body.return_ty(), tainted_by_errors);
+        };
+
+        let return_loc = ccx.body.terminator_loc(return_block);
+
+        ConstQualifs {
+            needs_drop: self.needs_drop(ccx, RETURN_PLACE, return_loc),
+            needs_non_const_drop: self.needs_non_const_drop(ccx, RETURN_PLACE, return_loc),
+            has_mut_interior: self.has_mut_interior(ccx, RETURN_PLACE, return_loc),
+            tainted_by_errors,
+        }
+    }
+}
+
+struct LocalReturnTyVisitor<'ck, 'mir, 'tcx> {
+    kind: LocalKind,
+    checker: &'ck mut Checker<'mir, 'tcx>,
+}
+
+impl<'ck, 'mir, 'tcx> TypeVisitor<TyCtxt<'tcx>> for LocalReturnTyVisitor<'ck, 'mir, 'tcx> {
+    fn visit_ty(&mut self, t: Ty<'tcx>) {
+        match t.kind() {
+            ty::FnPtr(_) => {}
+            ty::Ref(_, _, hir::Mutability::Mut) => {
+                self.checker.check_op(ops::ty::MutRef(self.kind));
+                t.super_visit_with(self)
+            }
+            _ => t.super_visit_with(self),
+        }
+    }
+}
+
+pub struct Checker<'mir, 'tcx> {
+    ccx: &'mir ConstCx<'mir, 'tcx>,
+    qualifs: Qualifs<'mir, 'tcx>,
+
+    /// The span of the current statement.
+    span: Span,
+
+    /// A set that stores for each local whether it has a `StorageDead` for it somewhere.
+    local_has_storage_dead: Option<BitSet<Local>>,
+
+    error_emitted: Option<ErrorGuaranteed>,
+    secondary_errors: Vec<Diag<'tcx>>,
+}
+
+impl<'mir, 'tcx> Deref for Checker<'mir, 'tcx> {
+    type Target = ConstCx<'mir, 'tcx>;
+
+    fn deref(&self) -> &Self::Target {
+        self.ccx
+    }
+}
+
+impl<'mir, 'tcx> Checker<'mir, 'tcx> {
+    pub fn new(ccx: &'mir ConstCx<'mir, 'tcx>) -> Self {
+        Checker {
+            span: ccx.body.span,
+            ccx,
+            qualifs: Default::default(),
+            local_has_storage_dead: None,
+            error_emitted: None,
+            secondary_errors: Vec::new(),
+        }
+    }
+
+    pub fn check_body(&mut self) {
+        let ConstCx { tcx, body, .. } = *self.ccx;
+        let def_id = self.ccx.def_id();
+
+        // `async` functions cannot be `const fn`. This is checked during AST lowering, so there's
+        // no need to emit duplicate errors here.
+        if self.ccx.is_async() || body.coroutine.is_some() {
+            tcx.dcx().span_delayed_bug(body.span, "`async` functions cannot be `const fn`");
+            return;
+        }
+
+        // The local type and predicate checks are not free and only relevant for `const fn`s.
+        if self.const_kind() == hir::ConstContext::ConstFn {
+            for (idx, local) in body.local_decls.iter_enumerated() {
+                // Handle the return place below.
+                if idx == RETURN_PLACE {
+                    continue;
+                }
+
+                self.span = local.source_info.span;
+                self.check_local_or_return_ty(local.ty, idx);
+            }
+
+            // impl trait is gone in MIR, so check the return type of a const fn by its signature
+            // instead of the type of the return place.
+            self.span = body.local_decls[RETURN_PLACE].source_info.span;
+            let return_ty = self.ccx.fn_sig().output();
+            self.check_local_or_return_ty(return_ty.skip_binder(), RETURN_PLACE);
+        }
+
+        if !tcx.has_attr(def_id, sym::rustc_do_not_const_check) {
+            self.visit_body(body);
+        }
+
+        // If we got through const-checking without emitting any "primary" errors, emit any
+        // "secondary" errors if they occurred. Otherwise, cancel the "secondary" errors.
+        let secondary_errors = mem::take(&mut self.secondary_errors);
+        if self.error_emitted.is_none() {
+            for error in secondary_errors {
+                self.error_emitted = Some(error.emit());
+            }
+        } else {
+            assert!(self.tcx.dcx().has_errors().is_some());
+            for error in secondary_errors {
+                error.cancel();
+            }
+        }
+    }
+
+    fn local_has_storage_dead(&mut self, local: Local) -> bool {
+        let ccx = self.ccx;
+        self.local_has_storage_dead
+            .get_or_insert_with(|| {
+                struct StorageDeads {
+                    locals: BitSet<Local>,
+                }
+                impl<'tcx> Visitor<'tcx> for StorageDeads {
+                    fn visit_statement(&mut self, stmt: &Statement<'tcx>, _: Location) {
+                        if let StatementKind::StorageDead(l) = stmt.kind {
+                            self.locals.insert(l);
+                        }
+                    }
+                }
+                let mut v = StorageDeads { locals: BitSet::new_empty(ccx.body.local_decls.len()) };
+                v.visit_body(ccx.body);
+                v.locals
+            })
+            .contains(local)
+    }
+
+    pub fn qualifs_in_return_place(&mut self) -> ConstQualifs {
+        self.qualifs.in_return_place(self.ccx, self.error_emitted)
+    }
+
+    /// Emits an error if an expression cannot be evaluated in the current context.
+    pub fn check_op(&mut self, op: impl NonConstOp<'tcx>) {
+        self.check_op_spanned(op, self.span);
+    }
+
+    /// Emits an error at the given `span` if an expression cannot be evaluated in the current
+    /// context.
+    pub fn check_op_spanned<O: NonConstOp<'tcx>>(&mut self, op: O, span: Span) {
+        let gate = match op.status_in_item(self.ccx) {
+            Status::Allowed => return,
+
+            Status::Unstable(gate) if self.tcx.features().active(gate) => {
+                let unstable_in_stable = self.ccx.is_const_stable_const_fn()
+                    && !super::rustc_allow_const_fn_unstable(self.tcx, self.def_id(), gate);
+                if unstable_in_stable {
+                    emit_unstable_in_stable_error(self.ccx, span, gate);
+                }
+
+                return;
+            }
+
+            Status::Unstable(gate) => Some(gate),
+            Status::Forbidden => None,
+        };
+
+        if self.tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you {
+            self.tcx.sess.miri_unleashed_feature(span, gate);
+            return;
+        }
+
+        let err = op.build_error(self.ccx, span);
+        assert!(err.is_error());
+
+        match op.importance() {
+            ops::DiagImportance::Primary => {
+                let reported = err.emit();
+                self.error_emitted = Some(reported);
+            }
+
+            ops::DiagImportance::Secondary => self.secondary_errors.push(err),
+        }
+    }
+
+    fn check_static(&mut self, def_id: DefId, span: Span) {
+        if self.tcx.is_thread_local_static(def_id) {
+            self.tcx.dcx().span_bug(span, "tls access is checked in `Rvalue::ThreadLocalRef`");
+        }
+        self.check_op_spanned(ops::StaticAccess, span)
+    }
+
+    fn check_local_or_return_ty(&mut self, ty: Ty<'tcx>, local: Local) {
+        let kind = self.body.local_kind(local);
+
+        let mut visitor = LocalReturnTyVisitor { kind, checker: self };
+
+        visitor.visit_ty(ty);
+    }
+
+    fn check_mut_borrow(&mut self, place: &Place<'_>, kind: hir::BorrowKind) {
+        match self.const_kind() {
+            // In a const fn all borrows are transient or point to the places given via
+            // references in the arguments (so we already checked them with
+            // TransientMutBorrow/MutBorrow as appropriate).
+            // The borrow checker guarantees that no new non-transient borrows are created.
+            // NOTE: Once we have heap allocations during CTFE we need to figure out
+            // how to prevent `const fn` to create long-lived allocations that point
+            // to mutable memory.
+            hir::ConstContext::ConstFn => self.check_op(ops::TransientMutBorrow(kind)),
+            _ => {
+                // For indirect places, we are not creating a new permanent borrow, it's just as
+                // transient as the already existing one. For reborrowing references this is handled
+                // at the top of `visit_rvalue`, but for raw pointers we handle it here.
+                // Pointers/references to `static mut` and cases where the `*` is not the first
+                // projection also end up here.
+                // Locals with StorageDead do not live beyond the evaluation and can
+                // thus safely be borrowed without being able to be leaked to the final
+                // value of the constant.
+                // Note: This is only sound if every local that has a `StorageDead` has a
+                // `StorageDead` in every control flow path leading to a `return` terminator.
+                // The good news is that interning will detect if any unexpected mutable
+                // pointer slips through.
+                if place.is_indirect() || self.local_has_storage_dead(place.local) {
+                    self.check_op(ops::TransientMutBorrow(kind));
+                } else {
+                    self.check_op(ops::MutBorrow(kind));
+                }
+            }
+        }
+    }
+}
+
+impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
+    fn visit_basic_block_data(&mut self, bb: BasicBlock, block: &BasicBlockData<'tcx>) {
+        trace!("visit_basic_block_data: bb={:?} is_cleanup={:?}", bb, block.is_cleanup);
+
+        // We don't const-check basic blocks on the cleanup path since we never unwind during
+        // const-eval: a panic causes an immediate compile error. In other words, cleanup blocks
+        // are unreachable during const-eval.
+        //
+        // We can't be more conservative (e.g., by const-checking cleanup blocks anyways) because
+        // locals that would never be dropped during normal execution are sometimes dropped during
+        // unwinding, which means backwards-incompatible live-drop errors.
+        if block.is_cleanup {
+            return;
+        }
+
+        self.super_basic_block_data(bb, block);
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+        trace!("visit_rvalue: rvalue={:?} location={:?}", rvalue, location);
+
+        // Special-case reborrows to be more like a copy of a reference.
+        // FIXME: this does not actually handle all reborrows. It only detects cases where `*` is the outermost
+        // projection of the borrowed place, it skips deref'ing raw pointers and it skips `static`.
+        // All those cases are handled below with shared/mutable borrows.
+        // Once `const_mut_refs` is stable, we should be able to entirely remove this special case.
+        // (`const_refs_to_cell` is not needed, we already allow all borrows of indirect places anyway.)
+        match *rvalue {
+            Rvalue::Ref(_, kind, place) => {
+                if let Some(reborrowed_place_ref) = place_as_reborrow(self.tcx, self.body, place) {
+                    let ctx = match kind {
+                        BorrowKind::Shared => {
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow)
+                        }
+                        BorrowKind::Fake => {
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::FakeBorrow)
+                        }
+                        BorrowKind::Mut { .. } => {
+                            PlaceContext::MutatingUse(MutatingUseContext::Borrow)
+                        }
+                    };
+                    self.visit_local(reborrowed_place_ref.local, ctx, location);
+                    self.visit_projection(reborrowed_place_ref, ctx, location);
+                    return;
+                }
+            }
+            Rvalue::AddressOf(mutbl, place) => {
+                if let Some(reborrowed_place_ref) = place_as_reborrow(self.tcx, self.body, place) {
+                    let ctx = match mutbl {
+                        Mutability::Not => {
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::AddressOf)
+                        }
+                        Mutability::Mut => PlaceContext::MutatingUse(MutatingUseContext::AddressOf),
+                    };
+                    self.visit_local(reborrowed_place_ref.local, ctx, location);
+                    self.visit_projection(reborrowed_place_ref, ctx, location);
+                    return;
+                }
+            }
+            _ => {}
+        }
+
+        self.super_rvalue(rvalue, location);
+
+        match rvalue {
+            Rvalue::ThreadLocalRef(_) => self.check_op(ops::ThreadLocalAccess),
+
+            Rvalue::Use(_)
+            | Rvalue::CopyForDeref(..)
+            | Rvalue::Repeat(..)
+            | Rvalue::Discriminant(..)
+            | Rvalue::Len(_) => {}
+
+            Rvalue::Aggregate(kind, ..) => {
+                if let AggregateKind::Coroutine(def_id, ..) = kind.as_ref()
+                    && let Some(
+                        coroutine_kind @ hir::CoroutineKind::Desugared(
+                            hir::CoroutineDesugaring::Async,
+                            _,
+                        ),
+                    ) = self.tcx.coroutine_kind(def_id)
+                {
+                    self.check_op(ops::Coroutine(coroutine_kind));
+                }
+            }
+
+            Rvalue::Ref(_, BorrowKind::Mut { .. }, place)
+            | Rvalue::AddressOf(Mutability::Mut, place) => {
+                // Inside mutable statics, we allow arbitrary mutable references.
+                // We've allowed `static mut FOO = &mut [elements];` for a long time (the exact
+                // reasons why are lost to history), and there is no reason to restrict that to
+                // arrays and slices.
+                let is_allowed =
+                    self.const_kind() == hir::ConstContext::Static(hir::Mutability::Mut);
+
+                if !is_allowed {
+                    self.check_mut_borrow(
+                        place,
+                        if matches!(rvalue, Rvalue::Ref(..)) {
+                            hir::BorrowKind::Ref
+                        } else {
+                            hir::BorrowKind::Raw
+                        },
+                    );
+                }
+            }
+
+            Rvalue::Ref(_, BorrowKind::Shared | BorrowKind::Fake, place)
+            | Rvalue::AddressOf(Mutability::Not, place) => {
+                let borrowed_place_has_mut_interior = qualifs::in_place::<HasMutInterior, _>(
+                    self.ccx,
+                    &mut |local| self.qualifs.has_mut_interior(self.ccx, local, location),
+                    place.as_ref(),
+                );
+
+                // If the place is indirect, this is basically a reborrow. We have a reborrow
+                // special case above, but for raw pointers and pointers/references to `static` and
+                // when the `*` is not the first projection, `place_as_reborrow` does not recognize
+                // them as such, so we end up here. This should probably be considered a
+                // `TransientCellBorrow` (we consider the equivalent mutable case a
+                // `TransientMutBorrow`), but such reborrows got accidentally stabilized already and
+                // it is too much of a breaking change to take back.
+                if borrowed_place_has_mut_interior && !place.is_indirect() {
+                    match self.const_kind() {
+                        // In a const fn all borrows are transient or point to the places given via
+                        // references in the arguments (so we already checked them with
+                        // TransientCellBorrow/CellBorrow as appropriate).
+                        // The borrow checker guarantees that no new non-transient borrows are created.
+                        // NOTE: Once we have heap allocations during CTFE we need to figure out
+                        // how to prevent `const fn` to create long-lived allocations that point
+                        // to (interior) mutable memory.
+                        hir::ConstContext::ConstFn => self.check_op(ops::TransientCellBorrow),
+                        _ => {
+                            // Locals with StorageDead are definitely not part of the final constant value, and
+                            // it is thus inherently safe to permit such locals to have their
+                            // address taken as we can't end up with a reference to them in the
+                            // final value.
+                            // Note: This is only sound if every local that has a `StorageDead` has a
+                            // `StorageDead` in every control flow path leading to a `return` terminator.
+                            // The good news is that interning will detect if any unexpected mutable
+                            // pointer slips through.
+                            if self.local_has_storage_dead(place.local) {
+                                self.check_op(ops::TransientCellBorrow);
+                            } else {
+                                self.check_op(ops::CellBorrow);
+                            }
+                        }
+                    }
+                }
+            }
+
+            Rvalue::Cast(
+                CastKind::PointerCoercion(
+                    PointerCoercion::MutToConstPointer
+                    | PointerCoercion::ArrayToPointer
+                    | PointerCoercion::UnsafeFnPointer
+                    | PointerCoercion::ClosureFnPointer(_)
+                    | PointerCoercion::ReifyFnPointer,
+                ),
+                _,
+                _,
+            ) => {
+                // These are all okay; they only change the type, not the data.
+            }
+
+            Rvalue::Cast(CastKind::PointerCoercion(PointerCoercion::Unsize), _, _) => {
+                // Unsizing is implemented for CTFE.
+            }
+
+            Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => {
+                self.check_op(ops::RawPtrToIntCast);
+            }
+            Rvalue::Cast(CastKind::PointerFromExposedAddress, _, _) => {
+                // Since no pointer can ever get exposed (rejected above), this is easy to support.
+            }
+
+            Rvalue::Cast(CastKind::DynStar, _, _) => {
+                // `dyn*` coercion is implemented for CTFE.
+            }
+
+            Rvalue::Cast(_, _, _) => {}
+
+            Rvalue::NullaryOp(
+                NullOp::SizeOf | NullOp::AlignOf | NullOp::OffsetOf(_) | NullOp::UbCheck(_),
+                _,
+            ) => {}
+            Rvalue::ShallowInitBox(_, _) => {}
+
+            Rvalue::UnaryOp(_, operand) => {
+                let ty = operand.ty(self.body, self.tcx);
+                if is_int_bool_or_char(ty) {
+                    // Int, bool, and char operations are fine.
+                } else if ty.is_floating_point() {
+                    self.check_op(ops::FloatingPointOp);
+                } else {
+                    span_bug!(self.span, "non-primitive type in `Rvalue::UnaryOp`: {:?}", ty);
+                }
+            }
+
+            Rvalue::BinaryOp(op, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(op, box (lhs, rhs)) => {
+                let lhs_ty = lhs.ty(self.body, self.tcx);
+                let rhs_ty = rhs.ty(self.body, self.tcx);
+
+                if is_int_bool_or_char(lhs_ty) && is_int_bool_or_char(rhs_ty) {
+                    // Int, bool, and char operations are fine.
+                } else if lhs_ty.is_fn_ptr() || lhs_ty.is_unsafe_ptr() {
+                    assert!(matches!(
+                        op,
+                        BinOp::Eq
+                            | BinOp::Ne
+                            | BinOp::Le
+                            | BinOp::Lt
+                            | BinOp::Ge
+                            | BinOp::Gt
+                            | BinOp::Offset
+                    ));
+
+                    self.check_op(ops::RawPtrComparison);
+                } else if lhs_ty.is_floating_point() || rhs_ty.is_floating_point() {
+                    self.check_op(ops::FloatingPointOp);
+                } else {
+                    span_bug!(
+                        self.span,
+                        "non-primitive type in `Rvalue::BinaryOp`: {:?} ⚬ {:?}",
+                        lhs_ty,
+                        rhs_ty
+                    );
+                }
+            }
+        }
+    }
+
+    fn visit_operand(&mut self, op: &Operand<'tcx>, location: Location) {
+        self.super_operand(op, location);
+        if let Operand::Constant(c) = op {
+            if let Some(def_id) = c.check_static_ptr(self.tcx) {
+                self.check_static(def_id, self.span);
+            }
+        }
+    }
+    fn visit_projection_elem(
+        &mut self,
+        place_ref: PlaceRef<'tcx>,
+        elem: PlaceElem<'tcx>,
+        context: PlaceContext,
+        location: Location,
+    ) {
+        trace!(
+            "visit_projection_elem: place_ref={:?} elem={:?} \
+            context={:?} location={:?}",
+            place_ref,
+            elem,
+            context,
+            location,
+        );
+
+        self.super_projection_elem(place_ref, elem, context, location);
+
+        match elem {
+            ProjectionElem::Deref => {
+                let base_ty = place_ref.ty(self.body, self.tcx).ty;
+                if base_ty.is_unsafe_ptr() {
+                    if place_ref.projection.is_empty() {
+                        let decl = &self.body.local_decls[place_ref.local];
+                        // If this is a static, then this is not really dereferencing a pointer,
+                        // just directly accessing a static. That is not subject to any feature
+                        // gates (except for the one about whether statics can even be used, but
+                        // that is checked already by `visit_operand`).
+                        if let LocalInfo::StaticRef { .. } = *decl.local_info() {
+                            return;
+                        }
+                    }
+
+                    // `*const T` is stable, `*mut T` is not
+                    if !base_ty.is_mutable_ptr() {
+                        return;
+                    }
+
+                    self.check_op(ops::RawMutPtrDeref);
+                }
+
+                if context.is_mutating_use() {
+                    self.check_op(ops::MutDeref);
+                }
+            }
+
+            ProjectionElem::ConstantIndex { .. }
+            | ProjectionElem::Downcast(..)
+            | ProjectionElem::OpaqueCast(..)
+            | ProjectionElem::Subslice { .. }
+            | ProjectionElem::Subtype(..)
+            | ProjectionElem::Field(..)
+            | ProjectionElem::Index(_) => {}
+        }
+    }
+
+    fn visit_source_info(&mut self, source_info: &SourceInfo) {
+        trace!("visit_source_info: source_info={:?}", source_info);
+        self.span = source_info.span;
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        trace!("visit_statement: statement={:?} location={:?}", statement, location);
+
+        self.super_statement(statement, location);
+
+        match statement.kind {
+            StatementKind::Assign(..)
+            | StatementKind::SetDiscriminant { .. }
+            | StatementKind::Deinit(..)
+            | StatementKind::FakeRead(..)
+            | StatementKind::StorageLive(_)
+            | StatementKind::StorageDead(_)
+            | StatementKind::Retag { .. }
+            | StatementKind::PlaceMention(..)
+            | StatementKind::AscribeUserType(..)
+            | StatementKind::Coverage(..)
+            | StatementKind::Intrinsic(..)
+            | StatementKind::ConstEvalCounter
+            | StatementKind::Nop => {}
+        }
+    }
+
+    #[instrument(level = "debug", skip(self))]
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        self.super_terminator(terminator, location);
+
+        match &terminator.kind {
+            TerminatorKind::Call { func, args, fn_span, call_source, .. } => {
+                let ConstCx { tcx, body, param_env, .. } = *self.ccx;
+                let caller = self.def_id();
+
+                let fn_ty = func.ty(body, tcx);
+
+                let (mut callee, mut fn_args) = match *fn_ty.kind() {
+                    ty::FnDef(def_id, fn_args) => (def_id, fn_args),
+
+                    ty::FnPtr(_) => {
+                        self.check_op(ops::FnCallIndirect);
+                        return;
+                    }
+                    _ => {
+                        span_bug!(terminator.source_info.span, "invalid callee of type {:?}", fn_ty)
+                    }
+                };
+
+                // Check that all trait bounds that are marked as `~const` can be satisfied.
+                //
+                // Typeck only does a "non-const" check since it operates on HIR and cannot distinguish
+                // which path expressions are getting called on and which path expressions are only used
+                // as function pointers. This is required for correctness.
+                let infcx = tcx.infer_ctxt().build();
+                let ocx = ObligationCtxt::new(&infcx);
+
+                let predicates = tcx.predicates_of(callee).instantiate(tcx, fn_args);
+                let cause = ObligationCause::new(
+                    terminator.source_info.span,
+                    self.body.source.def_id().expect_local(),
+                    ObligationCauseCode::ItemObligation(callee),
+                );
+                let normalized_predicates = ocx.normalize(&cause, param_env, predicates);
+                ocx.register_obligations(traits::predicates_for_generics(
+                    |_, _| cause.clone(),
+                    self.param_env,
+                    normalized_predicates,
+                ));
+
+                let errors = ocx.select_all_or_error();
+                if !errors.is_empty() {
+                    infcx.err_ctxt().report_fulfillment_errors(errors);
+                }
+
+                let mut is_trait = false;
+                // Attempting to call a trait method?
+                if tcx.trait_of_item(callee).is_some() {
+                    trace!("attempting to call a trait method");
+                    // trait method calls are only permitted when `effects` is enabled.
+                    // we don't error, since that is handled by typeck. We try to resolve
+                    // the trait into the concrete method, and uses that for const stability
+                    // checks.
+                    // FIXME(effects) we might consider moving const stability checks to typeck as well.
+                    if tcx.features().effects {
+                        is_trait = true;
+
+                        if let Ok(Some(instance)) =
+                            Instance::resolve(tcx, param_env, callee, fn_args)
+                            && let InstanceDef::Item(def) = instance.def
+                        {
+                            // Resolve a trait method call to its concrete implementation, which may be in a
+                            // `const` trait impl. This is only used for the const stability check below, since
+                            // we want to look at the concrete impl's stability.
+                            fn_args = instance.args;
+                            callee = def;
+                        }
+                    } else {
+                        self.check_op(ops::FnCallNonConst {
+                            caller,
+                            callee,
+                            args: fn_args,
+                            span: *fn_span,
+                            call_source: *call_source,
+                            feature: Some(if tcx.features().const_trait_impl {
+                                sym::effects
+                            } else {
+                                sym::const_trait_impl
+                            }),
+                        });
+                        return;
+                    }
+                }
+
+                // At this point, we are calling a function, `callee`, whose `DefId` is known...
+
+                // `begin_panic` and `#[rustc_const_panic_str]` functions accept generic
+                // types other than str. Check to enforce that only str can be used in
+                // const-eval.
+
+                // const-eval of the `begin_panic` fn assumes the argument is `&str`
+                if Some(callee) == tcx.lang_items().begin_panic_fn() {
+                    match args[0].node.ty(&self.ccx.body.local_decls, tcx).kind() {
+                        ty::Ref(_, ty, _) if ty.is_str() => return,
+                        _ => self.check_op(ops::PanicNonStr),
+                    }
+                }
+
+                // const-eval of `#[rustc_const_panic_str]` functions assumes the argument is `&&str`
+                if tcx.has_attr(callee, sym::rustc_const_panic_str) {
+                    match args[0].node.ty(&self.ccx.body.local_decls, tcx).kind() {
+                        ty::Ref(_, ty, _) if matches!(ty.kind(), ty::Ref(_, ty, _) if ty.is_str()) =>
+                        {
+                            return;
+                        }
+                        _ => self.check_op(ops::PanicNonStr),
+                    }
+                }
+
+                if Some(callee) == tcx.lang_items().exchange_malloc_fn() {
+                    self.check_op(ops::HeapAllocation);
+                    return;
+                }
+
+                if !tcx.is_const_fn_raw(callee) && !is_trait {
+                    self.check_op(ops::FnCallNonConst {
+                        caller,
+                        callee,
+                        args: fn_args,
+                        span: *fn_span,
+                        call_source: *call_source,
+                        feature: None,
+                    });
+                    return;
+                }
+
+                // If the `const fn` we are trying to call is not const-stable, ensure that we have
+                // the proper feature gate enabled.
+                if let Some((gate, implied_by)) = is_unstable_const_fn(tcx, callee) {
+                    trace!(?gate, "calling unstable const fn");
+                    if self.span.allows_unstable(gate) {
+                        return;
+                    }
+                    if let Some(implied_by_gate) = implied_by
+                        && self.span.allows_unstable(implied_by_gate)
+                    {
+                        return;
+                    }
+
+                    // Calling an unstable function *always* requires that the corresponding gate
+                    // (or implied gate) be enabled, even if the function has
+                    // `#[rustc_allow_const_fn_unstable(the_gate)]`.
+                    let gate_declared = |gate| {
+                        tcx.features().declared_lib_features.iter().any(|&(sym, _)| sym == gate)
+                    };
+                    let feature_gate_declared = gate_declared(gate);
+                    let implied_gate_declared = implied_by.is_some_and(gate_declared);
+                    if !feature_gate_declared && !implied_gate_declared {
+                        self.check_op(ops::FnCallUnstable(callee, Some(gate)));
+                        return;
+                    }
+
+                    // If this crate is not using stability attributes, or the caller is not claiming to be a
+                    // stable `const fn`, that is all that is required.
+                    if !self.ccx.is_const_stable_const_fn() {
+                        trace!("crate not using stability attributes or caller not stably const");
+                        return;
+                    }
+
+                    // Otherwise, we are something const-stable calling a const-unstable fn.
+                    if super::rustc_allow_const_fn_unstable(tcx, caller, gate) {
+                        trace!("rustc_allow_const_fn_unstable gate active");
+                        return;
+                    }
+
+                    self.check_op(ops::FnCallUnstable(callee, Some(gate)));
+                    return;
+                }
+
+                // FIXME(ecstaticmorse); For compatibility, we consider `unstable` callees that
+                // have no `rustc_const_stable` attributes to be const-unstable as well. This
+                // should be fixed later.
+                let callee_is_unstable_unmarked = tcx.lookup_const_stability(callee).is_none()
+                    && tcx.lookup_stability(callee).is_some_and(|s| s.is_unstable());
+                if callee_is_unstable_unmarked {
+                    trace!("callee_is_unstable_unmarked");
+                    // We do not use `const` modifiers for intrinsic "functions", as intrinsics are
+                    // `extern` functions, and these have no way to get marked `const`. So instead we
+                    // use `rustc_const_(un)stable` attributes to mean that the intrinsic is `const`
+                    if self.ccx.is_const_stable_const_fn() || tcx.intrinsic(callee).is_some() {
+                        self.check_op(ops::FnCallUnstable(callee, None));
+                        return;
+                    }
+                }
+                trace!("permitting call");
+            }
+
+            // Forbid all `Drop` terminators unless the place being dropped is a local with no
+            // projections that cannot be `NeedsNonConstDrop`.
+            TerminatorKind::Drop { place: dropped_place, .. } => {
+                // If we are checking live drops after drop-elaboration, don't emit duplicate
+                // errors here.
+                if super::post_drop_elaboration::checking_enabled(self.ccx) {
+                    return;
+                }
+
+                let mut err_span = self.span;
+                let ty_of_dropped_place = dropped_place.ty(self.body, self.tcx).ty;
+
+                let ty_needs_non_const_drop =
+                    qualifs::NeedsNonConstDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place);
+
+                debug!(?ty_of_dropped_place, ?ty_needs_non_const_drop);
+
+                if !ty_needs_non_const_drop {
+                    return;
+                }
+
+                let needs_non_const_drop = if let Some(local) = dropped_place.as_local() {
+                    // Use the span where the local was declared as the span of the drop error.
+                    err_span = self.body.local_decls[local].source_info.span;
+                    self.qualifs.needs_non_const_drop(self.ccx, local, location)
+                } else {
+                    true
+                };
+
+                if needs_non_const_drop {
+                    self.check_op_spanned(
+                        ops::LiveDrop {
+                            dropped_at: Some(terminator.source_info.span),
+                            dropped_ty: ty_of_dropped_place,
+                        },
+                        err_span,
+                    );
+                }
+            }
+
+            TerminatorKind::InlineAsm { .. } => self.check_op(ops::InlineAsm),
+
+            TerminatorKind::Yield { .. } => self.check_op(ops::Coroutine(
+                self.tcx
+                    .coroutine_kind(self.body.source.def_id())
+                    .expect("Only expected to have a yield in a coroutine"),
+            )),
+
+            TerminatorKind::CoroutineDrop => {
+                span_bug!(
+                    self.body.source_info(location).span,
+                    "We should not encounter TerminatorKind::CoroutineDrop after coroutine transform"
+                );
+            }
+
+            TerminatorKind::UnwindTerminate(_) => {
+                // Cleanup blocks are skipped for const checking (see `visit_basic_block_data`).
+                span_bug!(self.span, "`Terminate` terminator outside of cleanup block")
+            }
+
+            TerminatorKind::Assert { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::Goto { .. }
+            | TerminatorKind::UnwindResume
+            | TerminatorKind::Return
+            | TerminatorKind::SwitchInt { .. }
+            | TerminatorKind::Unreachable => {}
+        }
+    }
+}
+
+fn place_as_reborrow<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    place: Place<'tcx>,
+) -> Option<PlaceRef<'tcx>> {
+    match place.as_ref().last_projection() {
+        Some((place_base, ProjectionElem::Deref)) => {
+            // FIXME: why do statics and raw pointers get excluded here? This makes
+            // some code involving mutable pointers unstable, but it is unclear
+            // why that code is treated differently from mutable references.
+            // Once TransientMutBorrow and TransientCellBorrow are stable,
+            // this can probably be cleaned up without any behavioral changes.
+
+            // A borrow of a `static` also looks like `&(*_1)` in the MIR, but `_1` is a `const`
+            // that points to the allocation for the static. Don't treat these as reborrows.
+            if body.local_decls[place_base.local].is_ref_to_static() {
+                None
+            } else {
+                // Ensure the type being derefed is a reference and not a raw pointer.
+                // This is sufficient to prevent an access to a `static mut` from being marked as a
+                // reborrow, even if the check above were to disappear.
+                let inner_ty = place_base.ty(body, tcx).ty;
+
+                if let ty::Ref(..) = inner_ty.kind() {
+                    return Some(place_base);
+                } else {
+                    return None;
+                }
+            }
+        }
+        _ => None,
+    }
+}
+
+fn is_int_bool_or_char(ty: Ty<'_>) -> bool {
+    ty.is_bool() || ty.is_integral() || ty.is_char()
+}
+
+fn emit_unstable_in_stable_error(ccx: &ConstCx<'_, '_>, span: Span, gate: Symbol) {
+    let attr_span = ccx.tcx.def_span(ccx.def_id()).shrink_to_lo();
+
+    ccx.dcx().emit_err(UnstableInStable { gate: gate.to_string(), span, attr_span });
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
new file mode 100644
index 00000000000..12e7ec15e32
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
@@ -0,0 +1,140 @@
+//! Check the bodies of `const`s, `static`s and `const fn`s for illegal operations.
+//!
+//! This module will eventually replace the parts of `qualify_consts.rs` that check whether a local
+//! has interior mutability or needs to be dropped, as well as the visitor that emits errors when
+//! it finds operations that are invalid in a certain context.
+
+use rustc_attr as attr;
+use rustc_errors::DiagCtxt;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::mir;
+use rustc_middle::ty::{self, PolyFnSig, TyCtxt};
+use rustc_span::Symbol;
+
+pub use self::qualifs::Qualif;
+
+pub mod check;
+mod ops;
+pub mod post_drop_elaboration;
+pub mod qualifs;
+mod resolver;
+
+/// Information about the item currently being const-checked, as well as a reference to the global
+/// context.
+pub struct ConstCx<'mir, 'tcx> {
+    pub body: &'mir mir::Body<'tcx>,
+    pub tcx: TyCtxt<'tcx>,
+    pub param_env: ty::ParamEnv<'tcx>,
+    pub const_kind: Option<hir::ConstContext>,
+}
+
+impl<'mir, 'tcx> ConstCx<'mir, 'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>, body: &'mir mir::Body<'tcx>) -> Self {
+        let def_id = body.source.def_id().expect_local();
+        let param_env = tcx.param_env(def_id);
+        Self::new_with_param_env(tcx, body, param_env)
+    }
+
+    pub fn new_with_param_env(
+        tcx: TyCtxt<'tcx>,
+        body: &'mir mir::Body<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> Self {
+        let const_kind = tcx.hir().body_const_context(body.source.def_id().expect_local());
+        ConstCx { body, tcx, param_env, const_kind }
+    }
+
+    pub(crate) fn dcx(&self) -> &'tcx DiagCtxt {
+        self.tcx.dcx()
+    }
+
+    pub fn def_id(&self) -> LocalDefId {
+        self.body.source.def_id().expect_local()
+    }
+
+    /// Returns the kind of const context this `Item` represents (`const`, `static`, etc.).
+    ///
+    /// Panics if this `Item` is not const.
+    pub fn const_kind(&self) -> hir::ConstContext {
+        self.const_kind.expect("`const_kind` must not be called on a non-const fn")
+    }
+
+    pub fn is_const_stable_const_fn(&self) -> bool {
+        self.const_kind == Some(hir::ConstContext::ConstFn)
+            && self.tcx.features().staged_api
+            && is_const_stable_const_fn(self.tcx, self.def_id().to_def_id())
+    }
+
+    fn is_async(&self) -> bool {
+        self.tcx.asyncness(self.def_id()).is_async()
+    }
+
+    pub fn fn_sig(&self) -> PolyFnSig<'tcx> {
+        let did = self.def_id().to_def_id();
+        if self.tcx.is_closure_like(did) {
+            let ty = self.tcx.type_of(did).instantiate_identity();
+            let ty::Closure(_, args) = ty.kind() else { bug!("type_of closure not ty::Closure") };
+            args.as_closure().sig()
+        } else {
+            self.tcx.fn_sig(did).instantiate_identity()
+        }
+    }
+}
+
+pub fn rustc_allow_const_fn_unstable(
+    tcx: TyCtxt<'_>,
+    def_id: LocalDefId,
+    feature_gate: Symbol,
+) -> bool {
+    let attrs = tcx.hir().attrs(tcx.local_def_id_to_hir_id(def_id));
+    attr::rustc_allow_const_fn_unstable(tcx.sess, attrs).any(|name| name == feature_gate)
+}
+
+/// Returns `true` if the given `const fn` is "const-stable".
+///
+/// Panics if the given `DefId` does not refer to a `const fn`.
+///
+/// Const-stability is only relevant for `const fn` within a `staged_api` crate. Only "const-stable"
+/// functions can be called in a const-context by users of the stable compiler. "const-stable"
+/// functions are subject to more stringent restrictions than "const-unstable" functions: They
+/// cannot use unstable features and can only call other "const-stable" functions.
+pub fn is_const_stable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    // A default body in a `#[const_trait]` is not const-stable because const
+    // trait fns currently cannot be const-stable. We shouldn't
+    // restrict default bodies to only call const-stable functions.
+    if tcx.is_const_default_method(def_id) {
+        return false;
+    }
+
+    // Const-stability is only relevant for `const fn`.
+    assert!(tcx.is_const_fn_raw(def_id));
+
+    // A function is only const-stable if it has `#[rustc_const_stable]` or it the trait it belongs
+    // to is const-stable.
+    match tcx.lookup_const_stability(def_id) {
+        Some(stab) => stab.is_const_stable(),
+        None if is_parent_const_stable_trait(tcx, def_id) => {
+            // Remove this when `#![feature(const_trait_impl)]` is stabilized,
+            // returning `true` unconditionally.
+            tcx.dcx().span_delayed_bug(
+                tcx.def_span(def_id),
+                "trait implementations cannot be const stable yet",
+            );
+            true
+        }
+        None => false, // By default, items are not const stable.
+    }
+}
+
+fn is_parent_const_stable_trait(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    let local_def_id = def_id.expect_local();
+    let hir_id = tcx.local_def_id_to_hir_id(local_def_id);
+
+    let parent_owner_id = tcx.parent_hir_id(hir_id).owner;
+    if !tcx.is_const_trait_impl_raw(parent_owner_id.to_def_id()) {
+        return false;
+    }
+
+    tcx.lookup_const_stability(parent_owner_id).is_some_and(|stab| stab.is_const_stable())
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
new file mode 100644
index 00000000000..15720f25c5c
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
@@ -0,0 +1,648 @@
+//! Concrete error types for all operations which may be invalid in a certain const context.
+
+use hir::def_id::LocalDefId;
+use hir::{ConstContext, LangItem};
+use rustc_errors::{codes::*, Diag};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
+use rustc_middle::mir::{self, CallSource};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::TraitRef;
+use rustc_middle::ty::{suggest_constraining_type_param, Adt, Closure, FnDef, FnPtr, Param, Ty};
+use rustc_middle::ty::{GenericArgKind, GenericArgsRef};
+use rustc_middle::util::{call_kind, CallDesugaringKind, CallKind};
+use rustc_session::parse::feature_err;
+use rustc_span::symbol::sym;
+use rustc_span::{BytePos, Pos, Span, Symbol};
+use rustc_trait_selection::traits::SelectionContext;
+
+use super::ConstCx;
+use crate::errors;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum Status {
+    Allowed,
+    Unstable(Symbol),
+    Forbidden,
+}
+
+#[derive(Clone, Copy)]
+pub enum DiagImportance {
+    /// An operation that must be removed for const-checking to pass.
+    Primary,
+
+    /// An operation that causes const-checking to fail, but is usually a side-effect of a `Primary` operation elsewhere.
+    Secondary,
+}
+
+/// An operation that is not *always* allowed in a const context.
+pub trait NonConstOp<'tcx>: std::fmt::Debug {
+    /// Returns an enum indicating whether this operation is allowed within the given item.
+    fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
+        Status::Forbidden
+    }
+
+    fn importance(&self) -> DiagImportance {
+        DiagImportance::Primary
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx>;
+}
+
+#[derive(Debug)]
+pub struct FloatingPointOp;
+impl<'tcx> NonConstOp<'tcx> for FloatingPointOp {
+    fn status_in_item(&self, ccx: &ConstCx<'_, 'tcx>) -> Status {
+        if ccx.const_kind() == hir::ConstContext::ConstFn {
+            Status::Unstable(sym::const_fn_floating_point_arithmetic)
+        } else {
+            Status::Allowed
+        }
+    }
+
+    #[allow(rustc::untranslatable_diagnostic)] // FIXME: make this translatable
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        feature_err(
+            &ccx.tcx.sess,
+            sym::const_fn_floating_point_arithmetic,
+            span,
+            format!("floating point arithmetic is not allowed in {}s", ccx.const_kind()),
+        )
+    }
+}
+
+/// A function call where the callee is a pointer.
+#[derive(Debug)]
+pub struct FnCallIndirect;
+impl<'tcx> NonConstOp<'tcx> for FnCallIndirect {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        ccx.dcx().create_err(errors::UnallowedFnPointerCall { span, kind: ccx.const_kind() })
+    }
+}
+
+/// A function call where the callee is not marked as `const`.
+#[derive(Debug, Clone, Copy)]
+pub struct FnCallNonConst<'tcx> {
+    pub caller: LocalDefId,
+    pub callee: DefId,
+    pub args: GenericArgsRef<'tcx>,
+    pub span: Span,
+    pub call_source: CallSource,
+    pub feature: Option<Symbol>,
+}
+
+impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
+    // FIXME: make this translatable
+    #[allow(rustc::diagnostic_outside_of_impl)]
+    #[allow(rustc::untranslatable_diagnostic)]
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, _: Span) -> Diag<'tcx> {
+        let FnCallNonConst { caller, callee, args, span, call_source, feature } = *self;
+        let ConstCx { tcx, param_env, .. } = *ccx;
+
+        let diag_trait = |err, self_ty: Ty<'_>, trait_id| {
+            let trait_ref = TraitRef::from_method(tcx, trait_id, args);
+
+            match self_ty.kind() {
+                Param(param_ty) => {
+                    debug!(?param_ty);
+                    if let Some(generics) = tcx.hir_node_by_def_id(caller).generics() {
+                        let constraint = with_no_trimmed_paths!(format!(
+                            "~const {}",
+                            trait_ref.print_only_trait_path()
+                        ));
+                        suggest_constraining_type_param(
+                            tcx,
+                            generics,
+                            err,
+                            param_ty.name.as_str(),
+                            &constraint,
+                            None,
+                            None,
+                        );
+                    }
+                }
+                Adt(..) => {
+                    let obligation =
+                        Obligation::new(tcx, ObligationCause::dummy(), param_env, trait_ref);
+
+                    let infcx = tcx.infer_ctxt().build();
+                    let mut selcx = SelectionContext::new(&infcx);
+                    let implsrc = selcx.select(&obligation);
+
+                    if let Ok(Some(ImplSource::UserDefined(data))) = implsrc {
+                        // FIXME(effects) revisit this
+                        if !tcx.is_const_trait_impl_raw(data.impl_def_id) {
+                            let span = tcx.def_span(data.impl_def_id);
+                            err.subdiagnostic(tcx.dcx(), errors::NonConstImplNote { span });
+                        }
+                    }
+                }
+                _ => {}
+            }
+        };
+
+        let call_kind =
+            call_kind(tcx, ccx.param_env, callee, args, span, call_source.from_hir_call(), None);
+
+        debug!(?call_kind);
+
+        let mut err = match call_kind {
+            CallKind::Normal { desugaring: Some((kind, self_ty)), .. } => {
+                macro_rules! error {
+                    ($err:ident) => {
+                        tcx.dcx().create_err(errors::$err {
+                            span,
+                            ty: self_ty,
+                            kind: ccx.const_kind(),
+                        })
+                    };
+                }
+
+                let mut err = match kind {
+                    CallDesugaringKind::ForLoopIntoIter => {
+                        error!(NonConstForLoopIntoIter)
+                    }
+                    CallDesugaringKind::QuestionBranch => {
+                        error!(NonConstQuestionBranch)
+                    }
+                    CallDesugaringKind::QuestionFromResidual => {
+                        error!(NonConstQuestionFromResidual)
+                    }
+                    CallDesugaringKind::TryBlockFromOutput => {
+                        error!(NonConstTryBlockFromOutput)
+                    }
+                    CallDesugaringKind::Await => {
+                        error!(NonConstAwait)
+                    }
+                };
+
+                diag_trait(&mut err, self_ty, kind.trait_def_id(tcx));
+                err
+            }
+            CallKind::FnCall { fn_trait_id, self_ty } => {
+                let note = match self_ty.kind() {
+                    FnDef(def_id, ..) => {
+                        let span = tcx.def_span(*def_id);
+                        if ccx.tcx.is_const_fn_raw(*def_id) {
+                            span_bug!(span, "calling const FnDef errored when it shouldn't");
+                        }
+
+                        Some(errors::NonConstClosureNote::FnDef { span })
+                    }
+                    FnPtr(..) => Some(errors::NonConstClosureNote::FnPtr),
+                    Closure(..) => Some(errors::NonConstClosureNote::Closure),
+                    _ => None,
+                };
+
+                let mut err = tcx.dcx().create_err(errors::NonConstClosure {
+                    span,
+                    kind: ccx.const_kind(),
+                    note,
+                });
+
+                diag_trait(&mut err, self_ty, fn_trait_id);
+                err
+            }
+            CallKind::Operator { trait_id, self_ty, .. } => {
+                let mut err = if let CallSource::MatchCmp = call_source {
+                    tcx.dcx().create_err(errors::NonConstMatchEq {
+                        span,
+                        kind: ccx.const_kind(),
+                        ty: self_ty,
+                    })
+                } else {
+                    let mut sugg = None;
+
+                    if Some(trait_id) == ccx.tcx.lang_items().eq_trait() {
+                        match (args[0].unpack(), args[1].unpack()) {
+                            (GenericArgKind::Type(self_ty), GenericArgKind::Type(rhs_ty))
+                                if self_ty == rhs_ty
+                                    && self_ty.is_ref()
+                                    && self_ty.peel_refs().is_primitive() =>
+                            {
+                                let mut num_refs = 0;
+                                let mut tmp_ty = self_ty;
+                                while let rustc_middle::ty::Ref(_, inner_ty, _) = tmp_ty.kind() {
+                                    num_refs += 1;
+                                    tmp_ty = *inner_ty;
+                                }
+                                let deref = "*".repeat(num_refs);
+
+                                if let Ok(call_str) =
+                                    ccx.tcx.sess.source_map().span_to_snippet(span)
+                                {
+                                    if let Some(eq_idx) = call_str.find("==") {
+                                        if let Some(rhs_idx) = call_str[(eq_idx + 2)..]
+                                            .find(|c: char| !c.is_whitespace())
+                                        {
+                                            let rhs_pos = span.lo()
+                                                + BytePos::from_usize(eq_idx + 2 + rhs_idx);
+                                            let rhs_span = span.with_lo(rhs_pos).with_hi(rhs_pos);
+                                            sugg = Some(errors::ConsiderDereferencing {
+                                                deref,
+                                                span: span.shrink_to_lo(),
+                                                rhs_span,
+                                            });
+                                        }
+                                    }
+                                }
+                            }
+                            _ => {}
+                        }
+                    }
+                    tcx.dcx().create_err(errors::NonConstOperator {
+                        span,
+                        kind: ccx.const_kind(),
+                        sugg,
+                    })
+                };
+
+                diag_trait(&mut err, self_ty, trait_id);
+                err
+            }
+            CallKind::DerefCoercion { deref_target, deref_target_ty, self_ty } => {
+                // Check first whether the source is accessible (issue #87060)
+                let target = if tcx.sess.source_map().is_span_accessible(deref_target) {
+                    Some(deref_target)
+                } else {
+                    None
+                };
+
+                let mut err = tcx.dcx().create_err(errors::NonConstDerefCoercion {
+                    span,
+                    ty: self_ty,
+                    kind: ccx.const_kind(),
+                    target_ty: deref_target_ty,
+                    deref_target: target,
+                });
+
+                diag_trait(&mut err, self_ty, tcx.require_lang_item(LangItem::Deref, Some(span)));
+                err
+            }
+            _ if tcx.opt_parent(callee) == tcx.get_diagnostic_item(sym::ArgumentMethods) => {
+                ccx.dcx().create_err(errors::NonConstFmtMacroCall { span, kind: ccx.const_kind() })
+            }
+            _ => ccx.dcx().create_err(errors::NonConstFnCall {
+                span,
+                def_path_str: ccx.tcx.def_path_str_with_args(callee, args),
+                kind: ccx.const_kind(),
+            }),
+        };
+
+        err.note(format!(
+            "calls in {}s are limited to constant functions, \
+             tuple structs and tuple variants",
+            ccx.const_kind(),
+        ));
+
+        if let Some(feature) = feature
+            && ccx.tcx.sess.is_nightly_build()
+        {
+            err.help(format!("add `#![feature({feature})]` to the crate attributes to enable",));
+        }
+
+        if let ConstContext::Static(_) = ccx.const_kind() {
+            err.note("consider wrapping this expression in `Lazy::new(|| ...)` from the `once_cell` crate: https://crates.io/crates/once_cell");
+        }
+
+        err
+    }
+}
+
+/// A call to an `#[unstable]` const fn or `#[rustc_const_unstable]` function.
+///
+/// Contains the name of the feature that would allow the use of this function.
+#[derive(Debug)]
+pub struct FnCallUnstable(pub DefId, pub Option<Symbol>);
+
+impl<'tcx> NonConstOp<'tcx> for FnCallUnstable {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        let FnCallUnstable(def_id, feature) = *self;
+
+        let mut err = ccx
+            .dcx()
+            .create_err(errors::UnstableConstFn { span, def_path: ccx.tcx.def_path_str(def_id) });
+
+        // FIXME: make this translatable
+        #[allow(rustc::untranslatable_diagnostic)]
+        if ccx.is_const_stable_const_fn() {
+            err.help("const-stable functions can only call other const-stable functions");
+        } else if ccx.tcx.sess.is_nightly_build() {
+            if let Some(feature) = feature {
+                err.help(format!("add `#![feature({feature})]` to the crate attributes to enable"));
+            }
+        }
+
+        err
+    }
+}
+
+#[derive(Debug)]
+pub struct Coroutine(pub hir::CoroutineKind);
+impl<'tcx> NonConstOp<'tcx> for Coroutine {
+    fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+        if let hir::CoroutineKind::Desugared(
+            hir::CoroutineDesugaring::Async,
+            hir::CoroutineSource::Block,
+        ) = self.0
+        {
+            Status::Unstable(sym::const_async_blocks)
+        } else {
+            Status::Forbidden
+        }
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        let msg = format!("{:#}s are not allowed in {}s", self.0, ccx.const_kind());
+        if let hir::CoroutineKind::Desugared(
+            hir::CoroutineDesugaring::Async,
+            hir::CoroutineSource::Block,
+        ) = self.0
+        {
+            ccx.tcx.sess.create_feature_err(
+                errors::UnallowedOpInConstContext { span, msg },
+                sym::const_async_blocks,
+            )
+        } else {
+            ccx.dcx().create_err(errors::UnallowedOpInConstContext { span, msg })
+        }
+    }
+}
+
+#[derive(Debug)]
+pub struct HeapAllocation;
+impl<'tcx> NonConstOp<'tcx> for HeapAllocation {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        ccx.dcx().create_err(errors::UnallowedHeapAllocations {
+            span,
+            kind: ccx.const_kind(),
+            teach: ccx.tcx.sess.teach(E0010).then_some(()),
+        })
+    }
+}
+
+#[derive(Debug)]
+pub struct InlineAsm;
+impl<'tcx> NonConstOp<'tcx> for InlineAsm {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        ccx.dcx().create_err(errors::UnallowedInlineAsm { span, kind: ccx.const_kind() })
+    }
+}
+
+#[derive(Debug)]
+pub struct LiveDrop<'tcx> {
+    pub dropped_at: Option<Span>,
+    pub dropped_ty: Ty<'tcx>,
+}
+impl<'tcx> NonConstOp<'tcx> for LiveDrop<'tcx> {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        ccx.dcx().create_err(errors::LiveDrop {
+            span,
+            dropped_ty: self.dropped_ty,
+            kind: ccx.const_kind(),
+            dropped_at: self.dropped_at,
+        })
+    }
+}
+
+#[derive(Debug)]
+/// A borrow of a type that contains an `UnsafeCell` somewhere. The borrow never escapes to
+/// the final value of the constant.
+pub struct TransientCellBorrow;
+impl<'tcx> NonConstOp<'tcx> for TransientCellBorrow {
+    fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+        Status::Unstable(sym::const_refs_to_cell)
+    }
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        ccx.tcx
+            .sess
+            .create_feature_err(errors::InteriorMutabilityBorrow { span }, sym::const_refs_to_cell)
+    }
+}
+
+#[derive(Debug)]
+/// A borrow of a type that contains an `UnsafeCell` somewhere. The borrow might escape to
+/// the final value of the constant, and thus we cannot allow this (for now). We may allow
+/// it in the future for static items.
+pub struct CellBorrow;
+impl<'tcx> NonConstOp<'tcx> for CellBorrow {
+    fn importance(&self) -> DiagImportance {
+        // Most likely the code will try to do mutation with these borrows, which
+        // triggers its own errors. Only show this one if that does not happen.
+        DiagImportance::Secondary
+    }
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        // FIXME: Maybe a more elegant solution to this if else case
+        if let hir::ConstContext::Static(_) = ccx.const_kind() {
+            ccx.dcx().create_err(errors::InteriorMutableDataRefer {
+                span,
+                opt_help: Some(()),
+                kind: ccx.const_kind(),
+                teach: ccx.tcx.sess.teach(E0492).then_some(()),
+            })
+        } else {
+            ccx.dcx().create_err(errors::InteriorMutableDataRefer {
+                span,
+                opt_help: None,
+                kind: ccx.const_kind(),
+                teach: ccx.tcx.sess.teach(E0492).then_some(()),
+            })
+        }
+    }
+}
+
+#[derive(Debug)]
+/// This op is for `&mut` borrows in the trailing expression of a constant
+/// which uses the "enclosing scopes rule" to leak its locals into anonymous
+/// static or const items.
+pub struct MutBorrow(pub hir::BorrowKind);
+
+impl<'tcx> NonConstOp<'tcx> for MutBorrow {
+    fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
+        Status::Forbidden
+    }
+
+    fn importance(&self) -> DiagImportance {
+        // Most likely the code will try to do mutation with these borrows, which
+        // triggers its own errors. Only show this one if that does not happen.
+        DiagImportance::Secondary
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        match self.0 {
+            hir::BorrowKind::Raw => ccx.tcx.dcx().create_err(errors::UnallowedMutableRaw {
+                span,
+                kind: ccx.const_kind(),
+                teach: ccx.tcx.sess.teach(E0764).then_some(()),
+            }),
+            hir::BorrowKind::Ref => ccx.dcx().create_err(errors::UnallowedMutableRefs {
+                span,
+                kind: ccx.const_kind(),
+                teach: ccx.tcx.sess.teach(E0764).then_some(()),
+            }),
+        }
+    }
+}
+
+#[derive(Debug)]
+pub struct TransientMutBorrow(pub hir::BorrowKind);
+
+impl<'tcx> NonConstOp<'tcx> for TransientMutBorrow {
+    fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+        Status::Unstable(sym::const_mut_refs)
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        let kind = ccx.const_kind();
+        match self.0 {
+            hir::BorrowKind::Raw => ccx
+                .tcx
+                .sess
+                .create_feature_err(errors::TransientMutRawErr { span, kind }, sym::const_mut_refs),
+            hir::BorrowKind::Ref => ccx.tcx.sess.create_feature_err(
+                errors::TransientMutBorrowErr { span, kind },
+                sym::const_mut_refs,
+            ),
+        }
+    }
+}
+
+#[derive(Debug)]
+pub struct MutDeref;
+impl<'tcx> NonConstOp<'tcx> for MutDeref {
+    fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+        Status::Unstable(sym::const_mut_refs)
+    }
+
+    fn importance(&self) -> DiagImportance {
+        // Usually a side-effect of a `TransientMutBorrow` somewhere.
+        DiagImportance::Secondary
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        ccx.tcx.sess.create_feature_err(
+            errors::MutDerefErr { span, kind: ccx.const_kind() },
+            sym::const_mut_refs,
+        )
+    }
+}
+
+/// A call to a `panic()` lang item where the first argument is _not_ a `&str`.
+#[derive(Debug)]
+pub struct PanicNonStr;
+impl<'tcx> NonConstOp<'tcx> for PanicNonStr {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        ccx.dcx().create_err(errors::PanicNonStrErr { span })
+    }
+}
+
+/// Comparing raw pointers for equality.
+/// Not currently intended to ever be allowed, even behind a feature gate: operation depends on
+/// allocation base addresses that are not known at compile-time.
+#[derive(Debug)]
+pub struct RawPtrComparison;
+impl<'tcx> NonConstOp<'tcx> for RawPtrComparison {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        // FIXME(const_trait_impl): revert to span_bug?
+        ccx.dcx().create_err(errors::RawPtrComparisonErr { span })
+    }
+}
+
+#[derive(Debug)]
+pub struct RawMutPtrDeref;
+impl<'tcx> NonConstOp<'tcx> for RawMutPtrDeref {
+    fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
+        Status::Unstable(sym::const_mut_refs)
+    }
+
+    #[allow(rustc::untranslatable_diagnostic)] // FIXME: make this translatable
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        feature_err(
+            &ccx.tcx.sess,
+            sym::const_mut_refs,
+            span,
+            format!("dereferencing raw mutable pointers in {}s is unstable", ccx.const_kind(),),
+        )
+    }
+}
+
+/// Casting raw pointer or function pointer to an integer.
+/// Not currently intended to ever be allowed, even behind a feature gate: operation depends on
+/// allocation base addresses that are not known at compile-time.
+#[derive(Debug)]
+pub struct RawPtrToIntCast;
+impl<'tcx> NonConstOp<'tcx> for RawPtrToIntCast {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        ccx.dcx().create_err(errors::RawPtrToIntErr { span })
+    }
+}
+
+/// An access to a (non-thread-local) `static`.
+#[derive(Debug)]
+pub struct StaticAccess;
+impl<'tcx> NonConstOp<'tcx> for StaticAccess {
+    fn status_in_item(&self, ccx: &ConstCx<'_, 'tcx>) -> Status {
+        if let hir::ConstContext::Static(_) = ccx.const_kind() {
+            Status::Allowed
+        } else {
+            Status::Unstable(sym::const_refs_to_static)
+        }
+    }
+
+    #[allow(rustc::untranslatable_diagnostic)] // FIXME: make this translatable
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        let mut err = feature_err(
+            &ccx.tcx.sess,
+            sym::const_refs_to_static,
+            span,
+            format!("referencing statics in {}s is unstable", ccx.const_kind(),),
+        );
+        // FIXME: make this translatable
+        #[allow(rustc::untranslatable_diagnostic)]
+        err
+            .note("`static` and `const` variables can refer to other `const` variables. A `const` variable, however, cannot refer to a `static` variable.")
+            .help("to fix this, the value can be extracted to a `const` and then used.");
+        err
+    }
+}
+
+/// An access to a thread-local `static`.
+#[derive(Debug)]
+pub struct ThreadLocalAccess;
+impl<'tcx> NonConstOp<'tcx> for ThreadLocalAccess {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+        ccx.dcx().create_err(errors::ThreadLocalAccessErr { span })
+    }
+}
+
+/// Types that cannot appear in the signature or locals of a `const fn`.
+pub mod ty {
+    use super::*;
+
+    #[derive(Debug)]
+    pub struct MutRef(pub mir::LocalKind);
+    impl<'tcx> NonConstOp<'tcx> for MutRef {
+        fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
+            Status::Unstable(sym::const_mut_refs)
+        }
+
+        fn importance(&self) -> DiagImportance {
+            match self.0 {
+                mir::LocalKind::Temp => DiagImportance::Secondary,
+                mir::LocalKind::ReturnPointer | mir::LocalKind::Arg => DiagImportance::Primary,
+            }
+        }
+
+        #[allow(rustc::untranslatable_diagnostic)] // FIXME: make this translatable
+        fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> {
+            feature_err(
+                &ccx.tcx.sess,
+                sym::const_mut_refs,
+                span,
+                format!("mutable references are not allowed in {}s", ccx.const_kind()),
+            )
+        }
+    }
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
new file mode 100644
index 00000000000..5cd13783c23
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
@@ -0,0 +1,123 @@
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{self, BasicBlock, Location};
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_span::{symbol::sym, Span};
+
+use super::check::Qualifs;
+use super::ops::{self, NonConstOp};
+use super::qualifs::{NeedsNonConstDrop, Qualif};
+use super::ConstCx;
+
+/// Returns `true` if we should use the more precise live drop checker that runs after drop
+/// elaboration.
+pub fn checking_enabled(ccx: &ConstCx<'_, '_>) -> bool {
+    // Const-stable functions must always use the stable live drop checker.
+    if ccx.is_const_stable_const_fn() {
+        return false;
+    }
+
+    ccx.tcx.features().const_precise_live_drops
+}
+
+/// Look for live drops in a const context.
+///
+/// This is separate from the rest of the const checking logic because it must run after drop
+/// elaboration.
+pub fn check_live_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mir::Body<'tcx>) {
+    let def_id = body.source.def_id().expect_local();
+    let const_kind = tcx.hir().body_const_context(def_id);
+    if const_kind.is_none() {
+        return;
+    }
+
+    if tcx.has_attr(def_id, sym::rustc_do_not_const_check) {
+        return;
+    }
+
+    let ccx = ConstCx { body, tcx, const_kind, param_env: tcx.param_env(def_id) };
+    if !checking_enabled(&ccx) {
+        return;
+    }
+
+    let mut visitor = CheckLiveDrops { ccx: &ccx, qualifs: Qualifs::default() };
+
+    visitor.visit_body(body);
+}
+
+struct CheckLiveDrops<'mir, 'tcx> {
+    ccx: &'mir ConstCx<'mir, 'tcx>,
+    qualifs: Qualifs<'mir, 'tcx>,
+}
+
+// So we can access `body` and `tcx`.
+impl<'mir, 'tcx> std::ops::Deref for CheckLiveDrops<'mir, 'tcx> {
+    type Target = ConstCx<'mir, 'tcx>;
+
+    fn deref(&self) -> &Self::Target {
+        self.ccx
+    }
+}
+
+impl<'tcx> CheckLiveDrops<'_, 'tcx> {
+    fn check_live_drop(&self, span: Span, dropped_ty: Ty<'tcx>) {
+        ops::LiveDrop { dropped_at: None, dropped_ty }.build_error(self.ccx, span).emit();
+    }
+}
+
+impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
+    fn visit_basic_block_data(&mut self, bb: BasicBlock, block: &mir::BasicBlockData<'tcx>) {
+        trace!("visit_basic_block_data: bb={:?} is_cleanup={:?}", bb, block.is_cleanup);
+
+        // Ignore drop terminators in cleanup blocks.
+        if block.is_cleanup {
+            return;
+        }
+
+        self.super_basic_block_data(bb, block);
+    }
+
+    fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+        trace!("visit_terminator: terminator={:?} location={:?}", terminator, location);
+
+        match &terminator.kind {
+            mir::TerminatorKind::Drop { place: dropped_place, .. } => {
+                let dropped_ty = dropped_place.ty(self.body, self.tcx).ty;
+
+                if !NeedsNonConstDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
+                    // Instead of throwing a bug, we just return here. This is because we have to
+                    // run custom `const Drop` impls.
+                    return;
+                }
+
+                if dropped_place.is_indirect() {
+                    self.check_live_drop(terminator.source_info.span, dropped_ty);
+                    return;
+                }
+
+                // Drop elaboration is not precise enough to accept code like
+                // `tests/ui/consts/control-flow/drop-pass.rs`; e.g., when an `Option<Vec<T>>` is
+                // initialized with `None` and never changed, it still emits drop glue.
+                // Hence we additionally check the qualifs here to allow more code to pass.
+                if self.qualifs.needs_non_const_drop(self.ccx, dropped_place.local, location) {
+                    // Use the span where the dropped local was declared for the error.
+                    let span = self.body.local_decls[dropped_place.local].source_info.span;
+                    self.check_live_drop(span, dropped_ty);
+                }
+            }
+
+            mir::TerminatorKind::UnwindTerminate(_)
+            | mir::TerminatorKind::Call { .. }
+            | mir::TerminatorKind::Assert { .. }
+            | mir::TerminatorKind::FalseEdge { .. }
+            | mir::TerminatorKind::FalseUnwind { .. }
+            | mir::TerminatorKind::CoroutineDrop
+            | mir::TerminatorKind::Goto { .. }
+            | mir::TerminatorKind::InlineAsm { .. }
+            | mir::TerminatorKind::UnwindResume
+            | mir::TerminatorKind::Return
+            | mir::TerminatorKind::SwitchInt { .. }
+            | mir::TerminatorKind::Unreachable
+            | mir::TerminatorKind::Yield { .. } => {}
+        }
+    }
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
new file mode 100644
index 00000000000..1847847d9d2
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -0,0 +1,395 @@
+//! Structural const qualification.
+//!
+//! See the `Qualif` trait for more info.
+
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::LangItem;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::mir;
+use rustc_middle::mir::*;
+use rustc_middle::traits::BuiltinImplSource;
+use rustc_middle::ty::{self, AdtDef, GenericArgsRef, Ty};
+use rustc_trait_selection::traits::{
+    ImplSource, Obligation, ObligationCause, ObligationCtxt, SelectionContext,
+};
+
+use super::ConstCx;
+
+pub fn in_any_value_of_ty<'tcx>(
+    cx: &ConstCx<'_, 'tcx>,
+    ty: Ty<'tcx>,
+    tainted_by_errors: Option<ErrorGuaranteed>,
+) -> ConstQualifs {
+    ConstQualifs {
+        has_mut_interior: HasMutInterior::in_any_value_of_ty(cx, ty),
+        needs_drop: NeedsDrop::in_any_value_of_ty(cx, ty),
+        needs_non_const_drop: NeedsNonConstDrop::in_any_value_of_ty(cx, ty),
+        tainted_by_errors,
+    }
+}
+
+/// A "qualif"(-ication) is a way to look for something "bad" in the MIR that would disqualify some
+/// code for promotion or prevent it from evaluating at compile time.
+///
+/// Normally, we would determine what qualifications apply to each type and error when an illegal
+/// operation is performed on such a type. However, this was found to be too imprecise, especially
+/// in the presence of `enum`s. If only a single variant of an enum has a certain qualification, we
+/// needn't reject code unless it actually constructs and operates on the qualified variant.
+///
+/// To accomplish this, const-checking and promotion use a value-based analysis (as opposed to a
+/// type-based one). Qualifications propagate structurally across variables: If a local (or a
+/// projection of a local) is assigned a qualified value, that local itself becomes qualified.
+pub trait Qualif {
+    /// The name of the file used to debug the dataflow analysis that computes this qualif.
+    const ANALYSIS_NAME: &'static str;
+
+    /// Whether this `Qualif` is cleared when a local is moved from.
+    const IS_CLEARED_ON_MOVE: bool = false;
+
+    /// Whether this `Qualif` might be evaluated after the promotion and can encounter a promoted.
+    const ALLOW_PROMOTED: bool = false;
+
+    /// Extracts the field of `ConstQualifs` that corresponds to this `Qualif`.
+    fn in_qualifs(qualifs: &ConstQualifs) -> bool;
+
+    /// Returns `true` if *any* value of the given type could possibly have this `Qualif`.
+    ///
+    /// This function determines `Qualif`s when we cannot do a value-based analysis. Since qualif
+    /// propagation is context-insensitive, this includes function arguments and values returned
+    /// from a call to another function.
+    ///
+    /// It also determines the `Qualif`s for primitive types.
+    fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool;
+
+    /// Returns `true` if this `Qualif` is inherent to the given struct or enum.
+    ///
+    /// By default, `Qualif`s propagate into ADTs in a structural way: An ADT only becomes
+    /// qualified if part of it is assigned a value with that `Qualif`. However, some ADTs *always*
+    /// have a certain `Qualif`, regardless of whether their fields have it. For example, a type
+    /// with a custom `Drop` impl is inherently `NeedsDrop`.
+    ///
+    /// Returning `true` for `in_adt_inherently` but `false` for `in_any_value_of_ty` is unsound.
+    fn in_adt_inherently<'tcx>(
+        cx: &ConstCx<'_, 'tcx>,
+        adt: AdtDef<'tcx>,
+        args: GenericArgsRef<'tcx>,
+    ) -> bool;
+
+    /// Returns `true` if this `Qualif` behaves sructurally for pointers and references:
+    /// the pointer/reference qualifies if and only if the pointee qualifies.
+    ///
+    /// (This is currently `false` for all our instances, but that may change in the future. Also,
+    /// by keeping it abstract, the handling of `Deref` in `in_place` becomes more clear.)
+    fn deref_structural<'tcx>(cx: &ConstCx<'_, 'tcx>) -> bool;
+}
+
+/// Constant containing interior mutability (`UnsafeCell<T>`).
+/// This must be ruled out to make sure that evaluating the constant at compile-time
+/// and at *any point* during the run-time would produce the same result. In particular,
+/// promotion of temporaries must not change program behavior; if the promoted could be
+/// written to, that would be a problem.
+pub struct HasMutInterior;
+
+impl Qualif for HasMutInterior {
+    const ANALYSIS_NAME: &'static str = "flow_has_mut_interior";
+
+    fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+        qualifs.has_mut_interior
+    }
+
+    fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+        !ty.is_freeze(cx.tcx, cx.param_env)
+    }
+
+    fn in_adt_inherently<'tcx>(
+        _cx: &ConstCx<'_, 'tcx>,
+        adt: AdtDef<'tcx>,
+        _: GenericArgsRef<'tcx>,
+    ) -> bool {
+        // Exactly one type, `UnsafeCell`, has the `HasMutInterior` qualif inherently.
+        // It arises structurally for all other types.
+        adt.is_unsafe_cell()
+    }
+
+    fn deref_structural<'tcx>(_cx: &ConstCx<'_, 'tcx>) -> bool {
+        false
+    }
+}
+
+/// Constant containing an ADT that implements `Drop`.
+/// This must be ruled out because implicit promotion would remove side-effects
+/// that occur as part of dropping that value. N.B., the implicit promotion has
+/// to reject const Drop implementations because even if side-effects are ruled
+/// out through other means, the execution of the drop could diverge.
+pub struct NeedsDrop;
+
+impl Qualif for NeedsDrop {
+    const ANALYSIS_NAME: &'static str = "flow_needs_drop";
+    const IS_CLEARED_ON_MOVE: bool = true;
+
+    fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+        qualifs.needs_drop
+    }
+
+    fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+        ty.needs_drop(cx.tcx, cx.param_env)
+    }
+
+    fn in_adt_inherently<'tcx>(
+        cx: &ConstCx<'_, 'tcx>,
+        adt: AdtDef<'tcx>,
+        _: GenericArgsRef<'tcx>,
+    ) -> bool {
+        adt.has_dtor(cx.tcx)
+    }
+
+    fn deref_structural<'tcx>(_cx: &ConstCx<'_, 'tcx>) -> bool {
+        false
+    }
+}
+
+/// Constant containing an ADT that implements non-const `Drop`.
+/// This must be ruled out because we cannot run `Drop` during compile-time.
+pub struct NeedsNonConstDrop;
+
+impl Qualif for NeedsNonConstDrop {
+    const ANALYSIS_NAME: &'static str = "flow_needs_nonconst_drop";
+    const IS_CLEARED_ON_MOVE: bool = true;
+    const ALLOW_PROMOTED: bool = true;
+
+    fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+        qualifs.needs_non_const_drop
+    }
+
+    #[instrument(level = "trace", skip(cx), ret)]
+    fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+        // Avoid selecting for simple cases, such as builtin types.
+        if ty::util::is_trivially_const_drop(ty) {
+            return false;
+        }
+
+        // FIXME(effects): If `destruct` is not a `const_trait`,
+        // or effects are disabled in this crate, then give up.
+        let destruct_def_id = cx.tcx.require_lang_item(LangItem::Destruct, Some(cx.body.span));
+        if !cx.tcx.has_host_param(destruct_def_id) || !cx.tcx.features().effects {
+            return NeedsDrop::in_any_value_of_ty(cx, ty);
+        }
+
+        let obligation = Obligation::new(
+            cx.tcx,
+            ObligationCause::dummy_with_span(cx.body.span),
+            cx.param_env,
+            ty::TraitRef::new(
+                cx.tcx,
+                destruct_def_id,
+                [
+                    ty::GenericArg::from(ty),
+                    ty::GenericArg::from(cx.tcx.expected_host_effect_param_for_body(cx.def_id())),
+                ],
+            ),
+        );
+
+        let infcx = cx.tcx.infer_ctxt().build();
+        let mut selcx = SelectionContext::new(&infcx);
+        let Some(impl_src) = selcx.select(&obligation).ok().flatten() else {
+            // If we couldn't select a const destruct candidate, then it's bad
+            return true;
+        };
+
+        trace!(?impl_src);
+
+        if !matches!(
+            impl_src,
+            ImplSource::Builtin(BuiltinImplSource::Misc, _) | ImplSource::Param(_)
+        ) {
+            // If our const destruct candidate is not ConstDestruct or implied by the param env,
+            // then it's bad
+            return true;
+        }
+
+        if impl_src.borrow_nested_obligations().is_empty() {
+            return false;
+        }
+
+        // If we had any errors, then it's bad
+        let ocx = ObligationCtxt::new(&infcx);
+        ocx.register_obligations(impl_src.nested_obligations());
+        let errors = ocx.select_all_or_error();
+        !errors.is_empty()
+    }
+
+    fn in_adt_inherently<'tcx>(
+        cx: &ConstCx<'_, 'tcx>,
+        adt: AdtDef<'tcx>,
+        _: GenericArgsRef<'tcx>,
+    ) -> bool {
+        adt.has_non_const_dtor(cx.tcx)
+    }
+
+    fn deref_structural<'tcx>(_cx: &ConstCx<'_, 'tcx>) -> bool {
+        false
+    }
+}
+
+// FIXME: Use `mir::visit::Visitor` for the `in_*` functions if/when it supports early return.
+
+/// Returns `true` if this `Rvalue` contains qualif `Q`.
+pub fn in_rvalue<'tcx, Q, F>(
+    cx: &ConstCx<'_, 'tcx>,
+    in_local: &mut F,
+    rvalue: &Rvalue<'tcx>,
+) -> bool
+where
+    Q: Qualif,
+    F: FnMut(Local) -> bool,
+{
+    match rvalue {
+        Rvalue::ThreadLocalRef(_) | Rvalue::NullaryOp(..) => {
+            Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx))
+        }
+
+        Rvalue::Discriminant(place) | Rvalue::Len(place) => {
+            in_place::<Q, _>(cx, in_local, place.as_ref())
+        }
+
+        Rvalue::CopyForDeref(place) => in_place::<Q, _>(cx, in_local, place.as_ref()),
+
+        Rvalue::Use(operand)
+        | Rvalue::Repeat(operand, _)
+        | Rvalue::UnaryOp(_, operand)
+        | Rvalue::Cast(_, operand, _)
+        | Rvalue::ShallowInitBox(operand, _) => in_operand::<Q, _>(cx, in_local, operand),
+
+        Rvalue::BinaryOp(_, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(_, box (lhs, rhs)) => {
+            in_operand::<Q, _>(cx, in_local, lhs) || in_operand::<Q, _>(cx, in_local, rhs)
+        }
+
+        Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+            // Special-case reborrows to be more like a copy of the reference.
+            if let Some((place_base, ProjectionElem::Deref)) = place.as_ref().last_projection() {
+                let base_ty = place_base.ty(cx.body, cx.tcx).ty;
+                if let ty::Ref(..) = base_ty.kind() {
+                    return in_place::<Q, _>(cx, in_local, place_base);
+                }
+            }
+
+            in_place::<Q, _>(cx, in_local, place.as_ref())
+        }
+
+        Rvalue::Aggregate(kind, operands) => {
+            // Return early if we know that the struct or enum being constructed is always
+            // qualified.
+            if let AggregateKind::Adt(adt_did, _, args, ..) = **kind {
+                let def = cx.tcx.adt_def(adt_did);
+                if Q::in_adt_inherently(cx, def, args) {
+                    return true;
+                }
+                // Don't do any value-based reasoning for unions.
+                if def.is_union() && Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx)) {
+                    return true;
+                }
+            }
+
+            // Otherwise, proceed structurally...
+            operands.iter().any(|o| in_operand::<Q, _>(cx, in_local, o))
+        }
+    }
+}
+
+/// Returns `true` if this `Place` contains qualif `Q`.
+pub fn in_place<'tcx, Q, F>(cx: &ConstCx<'_, 'tcx>, in_local: &mut F, place: PlaceRef<'tcx>) -> bool
+where
+    Q: Qualif,
+    F: FnMut(Local) -> bool,
+{
+    let mut place = place;
+    while let Some((place_base, elem)) = place.last_projection() {
+        match elem {
+            ProjectionElem::Index(index) if in_local(index) => return true,
+
+            ProjectionElem::Deref
+            | ProjectionElem::Subtype(_)
+            | ProjectionElem::Field(_, _)
+            | ProjectionElem::OpaqueCast(_)
+            | ProjectionElem::ConstantIndex { .. }
+            | ProjectionElem::Subslice { .. }
+            | ProjectionElem::Downcast(_, _)
+            | ProjectionElem::Index(_) => {}
+        }
+
+        let base_ty = place_base.ty(cx.body, cx.tcx);
+        let proj_ty = base_ty.projection_ty(cx.tcx, elem).ty;
+        if !Q::in_any_value_of_ty(cx, proj_ty) {
+            return false;
+        }
+
+        if matches!(elem, ProjectionElem::Deref) && !Q::deref_structural(cx) {
+            // We have to assume that this qualifies.
+            return true;
+        }
+
+        place = place_base;
+    }
+
+    assert!(place.projection.is_empty());
+    in_local(place.local)
+}
+
+/// Returns `true` if this `Operand` contains qualif `Q`.
+pub fn in_operand<'tcx, Q, F>(
+    cx: &ConstCx<'_, 'tcx>,
+    in_local: &mut F,
+    operand: &Operand<'tcx>,
+) -> bool
+where
+    Q: Qualif,
+    F: FnMut(Local) -> bool,
+{
+    let constant = match operand {
+        Operand::Copy(place) | Operand::Move(place) => {
+            return in_place::<Q, _>(cx, in_local, place.as_ref());
+        }
+
+        Operand::Constant(c) => c,
+    };
+
+    // Check the qualifs of the value of `const` items.
+    let uneval = match constant.const_ {
+        Const::Ty(ct)
+            if matches!(
+                ct.kind(),
+                ty::ConstKind::Param(_) | ty::ConstKind::Error(_) | ty::ConstKind::Value(_)
+            ) =>
+        {
+            None
+        }
+        Const::Ty(c) => {
+            bug!("expected ConstKind::Param or ConstKind::Value here, found {:?}", c)
+        }
+        Const::Unevaluated(uv, _) => Some(uv),
+        Const::Val(..) => None,
+    };
+
+    if let Some(mir::UnevaluatedConst { def, args: _, promoted }) = uneval {
+        // Use qualifs of the type for the promoted. Promoteds in MIR body should be possible
+        // only for `NeedsNonConstDrop` with precise drop checking. This is the only const
+        // check performed after the promotion. Verify that with an assertion.
+        assert!(promoted.is_none() || Q::ALLOW_PROMOTED);
+
+        // Don't peek inside trait associated constants.
+        if promoted.is_none() && cx.tcx.trait_of_item(def).is_none() {
+            let qualifs = cx.tcx.at(constant.span).mir_const_qualif(def);
+
+            if !Q::in_qualifs(&qualifs) {
+                return false;
+            }
+
+            // Just in case the type is more specific than
+            // the definition, e.g., impl associated const
+            // with type parameters, take it into account.
+        }
+    }
+
+    // Otherwise use the qualifs of the type.
+    Q::in_any_value_of_ty(cx, constant.const_.ty())
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
new file mode 100644
index 00000000000..2c835f6750f
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
@@ -0,0 +1,366 @@
+//! Propagate `Qualif`s between locals and query the results.
+//!
+//! This contains the dataflow analysis used to track `Qualif`s on complex control-flow graphs.
+
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{
+    self, BasicBlock, CallReturnPlaces, Local, Location, Statement, StatementKind, TerminatorEdges,
+};
+use rustc_mir_dataflow::fmt::DebugWithContext;
+use rustc_mir_dataflow::JoinSemiLattice;
+use rustc_mir_dataflow::{Analysis, AnalysisDomain};
+
+use std::fmt;
+use std::marker::PhantomData;
+
+use super::{qualifs, ConstCx, Qualif};
+
+/// A `Visitor` that propagates qualifs between locals. This defines the transfer function of
+/// `FlowSensitiveAnalysis`.
+///
+/// To account for indirect assignments, data flow conservatively assumes that local becomes
+/// qualified immediately after it is borrowed or its address escapes. The borrow must allow for
+/// mutation, which includes shared borrows of places with interior mutability. The type of
+/// borrowed place must contain the qualif.
+struct TransferFunction<'a, 'mir, 'tcx, Q> {
+    ccx: &'a ConstCx<'mir, 'tcx>,
+    state: &'a mut State,
+    _qualif: PhantomData<Q>,
+}
+
+impl<'a, 'mir, 'tcx, Q> TransferFunction<'a, 'mir, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    fn new(ccx: &'a ConstCx<'mir, 'tcx>, state: &'a mut State) -> Self {
+        TransferFunction { ccx, state, _qualif: PhantomData }
+    }
+
+    fn initialize_state(&mut self) {
+        self.state.qualif.clear();
+        self.state.borrow.clear();
+
+        for arg in self.ccx.body.args_iter() {
+            let arg_ty = self.ccx.body.local_decls[arg].ty;
+            if Q::in_any_value_of_ty(self.ccx, arg_ty) {
+                self.state.qualif.insert(arg);
+            }
+        }
+    }
+
+    fn assign_qualif_direct(&mut self, place: &mir::Place<'tcx>, mut value: bool) {
+        debug_assert!(!place.is_indirect());
+
+        if !value {
+            for (base, _elem) in place.iter_projections() {
+                let base_ty = base.ty(self.ccx.body, self.ccx.tcx);
+                if base_ty.ty.is_union() && Q::in_any_value_of_ty(self.ccx, base_ty.ty) {
+                    value = true;
+                    break;
+                }
+            }
+        }
+
+        match (value, place.as_ref()) {
+            (true, mir::PlaceRef { local, .. }) => {
+                self.state.qualif.insert(local);
+            }
+
+            // For now, we do not clear the qualif if a local is overwritten in full by
+            // an unqualified rvalue (e.g. `y = 5`). This is to be consistent
+            // with aggregates where we overwrite all fields with assignments, which would not
+            // get this feature.
+            (false, mir::PlaceRef { local: _, projection: &[] }) => {
+                // self.state.qualif.remove(*local);
+            }
+
+            _ => {}
+        }
+    }
+
+    fn apply_call_return_effect(
+        &mut self,
+        _block: BasicBlock,
+        return_places: CallReturnPlaces<'_, 'tcx>,
+    ) {
+        return_places.for_each(|place| {
+            // We cannot reason about another function's internals, so use conservative type-based
+            // qualification for the result of a function call.
+            let return_ty = place.ty(self.ccx.body, self.ccx.tcx).ty;
+            let qualif = Q::in_any_value_of_ty(self.ccx, return_ty);
+
+            if !place.is_indirect() {
+                self.assign_qualif_direct(&place, qualif);
+            }
+        });
+    }
+
+    fn address_of_allows_mutation(&self) -> bool {
+        // Exact set of permissions granted by AddressOf is undecided. Conservatively assume that
+        // it might allow mutation until resolution of #56604.
+        true
+    }
+
+    fn ref_allows_mutation(&self, kind: mir::BorrowKind, place: mir::Place<'tcx>) -> bool {
+        match kind {
+            mir::BorrowKind::Mut { .. } => true,
+            mir::BorrowKind::Shared | mir::BorrowKind::Fake => {
+                self.shared_borrow_allows_mutation(place)
+            }
+        }
+    }
+
+    /// `&` only allow mutation if the borrowed place is `!Freeze`.
+    ///
+    /// This assumes that it is UB to take the address of a struct field whose type is
+    /// `Freeze`, then use pointer arithmetic to derive a pointer to a *different* field of
+    /// that same struct whose type is `!Freeze`. If we decide that this is not UB, we will
+    /// have to check the type of the borrowed **local** instead of the borrowed **place**
+    /// below. See [rust-lang/unsafe-code-guidelines#134].
+    ///
+    /// [rust-lang/unsafe-code-guidelines#134]: https://github.com/rust-lang/unsafe-code-guidelines/issues/134
+    fn shared_borrow_allows_mutation(&self, place: mir::Place<'tcx>) -> bool {
+        !place.ty(self.ccx.body, self.ccx.tcx).ty.is_freeze(self.ccx.tcx, self.ccx.param_env)
+    }
+}
+
+impl<'tcx, Q> Visitor<'tcx> for TransferFunction<'_, '_, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) {
+        self.super_operand(operand, location);
+
+        if !Q::IS_CLEARED_ON_MOVE {
+            return;
+        }
+
+        // If a local with no projections is moved from (e.g. `x` in `y = x`), record that
+        // it no longer needs to be dropped.
+        if let mir::Operand::Move(place) = operand {
+            if let Some(local) = place.as_local() {
+                // For backward compatibility with the MaybeMutBorrowedLocals used in an earlier
+                // implementation we retain qualif if a local had been borrowed before. This might
+                // not be strictly necessary since the local is no longer initialized.
+                if !self.state.borrow.contains(local) {
+                    self.state.qualif.remove(local);
+                }
+            }
+        }
+    }
+
+    fn visit_assign(
+        &mut self,
+        place: &mir::Place<'tcx>,
+        rvalue: &mir::Rvalue<'tcx>,
+        location: Location,
+    ) {
+        let qualif =
+            qualifs::in_rvalue::<Q, _>(self.ccx, &mut |l| self.state.qualif.contains(l), rvalue);
+        if !place.is_indirect() {
+            self.assign_qualif_direct(place, qualif);
+        }
+
+        // We need to assign qualifs to the left-hand side before visiting `rvalue` since
+        // qualifs can be cleared on move.
+        self.super_assign(place, rvalue, location);
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
+        self.super_rvalue(rvalue, location);
+
+        match rvalue {
+            mir::Rvalue::AddressOf(_mt, borrowed_place) => {
+                if !borrowed_place.is_indirect() && self.address_of_allows_mutation() {
+                    let place_ty = borrowed_place.ty(self.ccx.body, self.ccx.tcx).ty;
+                    if Q::in_any_value_of_ty(self.ccx, place_ty) {
+                        self.state.qualif.insert(borrowed_place.local);
+                        self.state.borrow.insert(borrowed_place.local);
+                    }
+                }
+            }
+
+            mir::Rvalue::Ref(_, kind, borrowed_place) => {
+                if !borrowed_place.is_indirect() && self.ref_allows_mutation(*kind, *borrowed_place)
+                {
+                    let place_ty = borrowed_place.ty(self.ccx.body, self.ccx.tcx).ty;
+                    if Q::in_any_value_of_ty(self.ccx, place_ty) {
+                        self.state.qualif.insert(borrowed_place.local);
+                        self.state.borrow.insert(borrowed_place.local);
+                    }
+                }
+            }
+
+            mir::Rvalue::Cast(..)
+            | mir::Rvalue::ShallowInitBox(..)
+            | mir::Rvalue::Use(..)
+            | mir::Rvalue::CopyForDeref(..)
+            | mir::Rvalue::ThreadLocalRef(..)
+            | mir::Rvalue::Repeat(..)
+            | mir::Rvalue::Len(..)
+            | mir::Rvalue::BinaryOp(..)
+            | mir::Rvalue::CheckedBinaryOp(..)
+            | mir::Rvalue::NullaryOp(..)
+            | mir::Rvalue::UnaryOp(..)
+            | mir::Rvalue::Discriminant(..)
+            | mir::Rvalue::Aggregate(..) => {}
+        }
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        match statement.kind {
+            StatementKind::StorageDead(local) => {
+                self.state.qualif.remove(local);
+                self.state.borrow.remove(local);
+            }
+            _ => self.super_statement(statement, location),
+        }
+    }
+
+    fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+        // The effect of assignment to the return place in `TerminatorKind::Call` is not applied
+        // here; that occurs in `apply_call_return_effect`.
+
+        // We ignore borrow on drop because custom drop impls are not allowed in consts.
+        // FIXME: Reconsider if accounting for borrows in drops is necessary for const drop.
+        self.super_terminator(terminator, location);
+    }
+}
+
+/// The dataflow analysis used to propagate qualifs on arbitrary CFGs.
+pub(super) struct FlowSensitiveAnalysis<'a, 'mir, 'tcx, Q> {
+    ccx: &'a ConstCx<'mir, 'tcx>,
+    _qualif: PhantomData<Q>,
+}
+
+impl<'a, 'mir, 'tcx, Q> FlowSensitiveAnalysis<'a, 'mir, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    pub(super) fn new(_: Q, ccx: &'a ConstCx<'mir, 'tcx>) -> Self {
+        FlowSensitiveAnalysis { ccx, _qualif: PhantomData }
+    }
+
+    fn transfer_function(&self, state: &'a mut State) -> TransferFunction<'a, 'mir, 'tcx, Q> {
+        TransferFunction::<Q>::new(self.ccx, state)
+    }
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub(super) struct State {
+    /// Describes whether a local contains qualif.
+    pub qualif: BitSet<Local>,
+    /// Describes whether a local's address escaped and it might become qualified as a result an
+    /// indirect mutation.
+    pub borrow: BitSet<Local>,
+}
+
+impl Clone for State {
+    fn clone(&self) -> Self {
+        State { qualif: self.qualif.clone(), borrow: self.borrow.clone() }
+    }
+
+    // Data flow engine when possible uses `clone_from` for domain values.
+    // Providing an implementation will avoid some intermediate memory allocations.
+    fn clone_from(&mut self, other: &Self) {
+        self.qualif.clone_from(&other.qualif);
+        self.borrow.clone_from(&other.borrow);
+    }
+}
+
+impl State {
+    #[inline]
+    pub(super) fn contains(&self, local: Local) -> bool {
+        self.qualif.contains(local)
+    }
+}
+
+impl<C> DebugWithContext<C> for State {
+    fn fmt_with(&self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.write_str("qualif: ")?;
+        self.qualif.fmt_with(ctxt, f)?;
+        f.write_str(" borrow: ")?;
+        self.borrow.fmt_with(ctxt, f)?;
+        Ok(())
+    }
+
+    fn fmt_diff_with(&self, old: &Self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        if self == old {
+            return Ok(());
+        }
+
+        if self.qualif != old.qualif {
+            f.write_str("qualif: ")?;
+            self.qualif.fmt_diff_with(&old.qualif, ctxt, f)?;
+            f.write_str("\n")?;
+        }
+
+        if self.borrow != old.borrow {
+            f.write_str("borrow: ")?;
+            self.qualif.fmt_diff_with(&old.borrow, ctxt, f)?;
+            f.write_str("\n")?;
+        }
+
+        Ok(())
+    }
+}
+
+impl JoinSemiLattice for State {
+    fn join(&mut self, other: &Self) -> bool {
+        self.qualif.join(&other.qualif) || self.borrow.join(&other.borrow)
+    }
+}
+
+impl<'tcx, Q> AnalysisDomain<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    type Domain = State;
+
+    const NAME: &'static str = Q::ANALYSIS_NAME;
+
+    fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+        State {
+            qualif: BitSet::new_empty(body.local_decls.len()),
+            borrow: BitSet::new_empty(body.local_decls.len()),
+        }
+    }
+
+    fn initialize_start_block(&self, _body: &mir::Body<'tcx>, state: &mut Self::Domain) {
+        self.transfer_function(state).initialize_state();
+    }
+}
+
+impl<'tcx, Q> Analysis<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    fn apply_statement_effect(
+        &mut self,
+        state: &mut Self::Domain,
+        statement: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        self.transfer_function(state).visit_statement(statement, location);
+    }
+
+    fn apply_terminator_effect<'mir>(
+        &mut self,
+        state: &mut Self::Domain,
+        terminator: &'mir mir::Terminator<'tcx>,
+        location: Location,
+    ) -> TerminatorEdges<'mir, 'tcx> {
+        self.transfer_function(state).visit_terminator(terminator, location);
+        terminator.edges()
+    }
+
+    fn apply_call_return_effect(
+        &mut self,
+        state: &mut Self::Domain,
+        block: BasicBlock,
+        return_places: CallReturnPlaces<'_, 'tcx>,
+    ) {
+        self.transfer_function(state).apply_call_return_effect(block, return_places)
+    }
+}
diff --git a/compiler/rustc_const_eval/src/transform/mod.rs b/compiler/rustc_const_eval/src/transform/mod.rs
new file mode 100644
index 00000000000..e3582c7d317
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/mod.rs
@@ -0,0 +1,2 @@
+pub mod check_consts;
+pub mod validate;
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
new file mode 100644
index 00000000000..f26c8f8592d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -0,0 +1,1350 @@
+//! Validates the MIR to ensure that invariants are upheld.
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_index::bit_set::BitSet;
+use rustc_index::IndexVec;
+use rustc_infer::traits::Reveal;
+use rustc_middle::mir::interpret::Scalar;
+use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt, Variance};
+use rustc_target::abi::{Size, FIRST_VARIANT};
+use rustc_target::spec::abi::Abi;
+
+use crate::util::is_within_packed;
+
+use crate::util::relate_types;
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum EdgeKind {
+    Unwind,
+    Normal,
+}
+
+pub struct Validator {
+    /// Describes at which point in the pipeline this validation is happening.
+    pub when: String,
+    /// The phase for which we are upholding the dialect. If the given phase forbids a specific
+    /// element, this validator will now emit errors if that specific element is encountered.
+    /// Note that phases that change the dialect cause all *following* phases to check the
+    /// invariants of the new dialect. A phase that changes dialects never checks the new invariants
+    /// itself.
+    pub mir_phase: MirPhase,
+}
+
+impl<'tcx> MirPass<'tcx> for Validator {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        // FIXME(JakobDegen): These bodies never instantiated in codegend anyway, so it's not
+        // terribly important that they pass the validator. However, I think other passes might
+        // still see them, in which case they might be surprised. It would probably be better if we
+        // didn't put this through the MIR pipeline at all.
+        if matches!(body.source.instance, InstanceDef::Intrinsic(..) | InstanceDef::Virtual(..)) {
+            return;
+        }
+        let def_id = body.source.def_id();
+        let mir_phase = self.mir_phase;
+        let param_env = match mir_phase.reveal() {
+            Reveal::UserFacing => tcx.param_env(def_id),
+            Reveal::All => tcx.param_env_reveal_all_normalized(def_id),
+        };
+
+        let can_unwind = if mir_phase <= MirPhase::Runtime(RuntimePhase::Initial) {
+            // In this case `AbortUnwindingCalls` haven't yet been executed.
+            true
+        } else if !tcx.def_kind(def_id).is_fn_like() {
+            true
+        } else {
+            let body_ty = tcx.type_of(def_id).skip_binder();
+            let body_abi = match body_ty.kind() {
+                ty::FnDef(..) => body_ty.fn_sig(tcx).abi(),
+                ty::Closure(..) => Abi::RustCall,
+                ty::CoroutineClosure(..) => Abi::RustCall,
+                ty::Coroutine(..) => Abi::Rust,
+                // No need to do MIR validation on error bodies
+                ty::Error(_) => return,
+                _ => {
+                    span_bug!(body.span, "unexpected body ty: {:?} phase {:?}", body_ty, mir_phase)
+                }
+            };
+
+            ty::layout::fn_can_unwind(tcx, Some(def_id), body_abi)
+        };
+
+        let mut cfg_checker = CfgChecker {
+            when: &self.when,
+            body,
+            tcx,
+            mir_phase,
+            unwind_edge_count: 0,
+            reachable_blocks: traversal::reachable_as_bitset(body),
+            value_cache: FxHashSet::default(),
+            can_unwind,
+        };
+        cfg_checker.visit_body(body);
+        cfg_checker.check_cleanup_control_flow();
+
+        // Also run the TypeChecker.
+        for (location, msg) in validate_types(tcx, self.mir_phase, param_env, body) {
+            cfg_checker.fail(location, msg);
+        }
+
+        if let MirPhase::Runtime(_) = body.phase {
+            if let ty::InstanceDef::Item(_) = body.source.instance {
+                if body.has_free_regions() {
+                    cfg_checker.fail(
+                        Location::START,
+                        format!("Free regions in optimized {} MIR", body.phase.name()),
+                    );
+                }
+            }
+        }
+
+        // Enforce that coroutine-closure layouts are identical.
+        if let Some(layout) = body.coroutine_layout()
+            && let Some(by_move_body) = body.coroutine_by_move_body()
+            && let Some(by_move_layout) = by_move_body.coroutine_layout()
+        {
+            if layout != by_move_layout {
+                // If this turns out not to be true, please let compiler-errors know.
+                // It is possible to support, but requires some changes to the layout
+                // computation code.
+                cfg_checker.fail(
+                    Location::START,
+                    format!(
+                        "Coroutine layout differs from by-move coroutine layout:\n\
+                        layout: {layout:#?}\n\
+                        by_move_layout: {by_move_layout:#?}",
+                    ),
+                );
+            }
+        }
+    }
+}
+
+struct CfgChecker<'a, 'tcx> {
+    when: &'a str,
+    body: &'a Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    mir_phase: MirPhase,
+    unwind_edge_count: usize,
+    reachable_blocks: BitSet<BasicBlock>,
+    value_cache: FxHashSet<u128>,
+    // If `false`, then the MIR must not contain `UnwindAction::Continue` or
+    // `TerminatorKind::Resume`.
+    can_unwind: bool,
+}
+
+impl<'a, 'tcx> CfgChecker<'a, 'tcx> {
+    #[track_caller]
+    fn fail(&self, location: Location, msg: impl AsRef<str>) {
+        // We might see broken MIR when other errors have already occurred.
+        assert!(
+            self.tcx.dcx().has_errors().is_some(),
+            "broken MIR in {:?} ({}) at {:?}:\n{}",
+            self.body.source.instance,
+            self.when,
+            location,
+            msg.as_ref(),
+        );
+    }
+
+    fn check_edge(&mut self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
+        if bb == START_BLOCK {
+            self.fail(location, "start block must not have predecessors")
+        }
+        if let Some(bb) = self.body.basic_blocks.get(bb) {
+            let src = self.body.basic_blocks.get(location.block).unwrap();
+            match (src.is_cleanup, bb.is_cleanup, edge_kind) {
+                // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
+                (false, false, EdgeKind::Normal)
+                // Cleanup blocks can jump to cleanup blocks along non-unwind edges
+                | (true, true, EdgeKind::Normal) => {}
+                // Non-cleanup blocks can jump to cleanup blocks along unwind edges
+                (false, true, EdgeKind::Unwind) => {
+                    self.unwind_edge_count += 1;
+                }
+                // All other jumps are invalid
+                _ => {
+                    self.fail(
+                        location,
+                        format!(
+                            "{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})",
+                            edge_kind,
+                            bb,
+                            src.is_cleanup,
+                            bb.is_cleanup,
+                        )
+                    )
+                }
+            }
+        } else {
+            self.fail(location, format!("encountered jump to invalid basic block {bb:?}"))
+        }
+    }
+
+    fn check_cleanup_control_flow(&self) {
+        if self.unwind_edge_count <= 1 {
+            return;
+        }
+        let doms = self.body.basic_blocks.dominators();
+        let mut post_contract_node = FxHashMap::default();
+        // Reusing the allocation across invocations of the closure
+        let mut dom_path = vec![];
+        let mut get_post_contract_node = |mut bb| {
+            let root = loop {
+                if let Some(root) = post_contract_node.get(&bb) {
+                    break *root;
+                }
+                let parent = doms.immediate_dominator(bb).unwrap();
+                dom_path.push(bb);
+                if !self.body.basic_blocks[parent].is_cleanup {
+                    break bb;
+                }
+                bb = parent;
+            };
+            for bb in dom_path.drain(..) {
+                post_contract_node.insert(bb, root);
+            }
+            root
+        };
+
+        let mut parent = IndexVec::from_elem(None, &self.body.basic_blocks);
+        for (bb, bb_data) in self.body.basic_blocks.iter_enumerated() {
+            if !bb_data.is_cleanup || !self.reachable_blocks.contains(bb) {
+                continue;
+            }
+            let bb = get_post_contract_node(bb);
+            for s in bb_data.terminator().successors() {
+                let s = get_post_contract_node(s);
+                if s == bb {
+                    continue;
+                }
+                let parent = &mut parent[bb];
+                match parent {
+                    None => {
+                        *parent = Some(s);
+                    }
+                    Some(e) if *e == s => (),
+                    Some(e) => self.fail(
+                        Location { block: bb, statement_index: 0 },
+                        format!(
+                            "Cleanup control flow violation: The blocks dominated by {:?} have edges to both {:?} and {:?}",
+                            bb,
+                            s,
+                            *e
+                        )
+                    ),
+                }
+            }
+        }
+
+        // Check for cycles
+        let mut stack = FxHashSet::default();
+        for i in 0..parent.len() {
+            let mut bb = BasicBlock::from_usize(i);
+            stack.clear();
+            stack.insert(bb);
+            loop {
+                let Some(parent) = parent[bb].take() else { break };
+                let no_cycle = stack.insert(parent);
+                if !no_cycle {
+                    self.fail(
+                        Location { block: bb, statement_index: 0 },
+                        format!(
+                            "Cleanup control flow violation: Cycle involving edge {bb:?} -> {parent:?}",
+                        ),
+                    );
+                    break;
+                }
+                bb = parent;
+            }
+        }
+    }
+
+    fn check_unwind_edge(&mut self, location: Location, unwind: UnwindAction) {
+        let is_cleanup = self.body.basic_blocks[location.block].is_cleanup;
+        match unwind {
+            UnwindAction::Cleanup(unwind) => {
+                if is_cleanup {
+                    self.fail(location, "`UnwindAction::Cleanup` in cleanup block");
+                }
+                self.check_edge(location, unwind, EdgeKind::Unwind);
+            }
+            UnwindAction::Continue => {
+                if is_cleanup {
+                    self.fail(location, "`UnwindAction::Continue` in cleanup block");
+                }
+
+                if !self.can_unwind {
+                    self.fail(location, "`UnwindAction::Continue` in no-unwind function");
+                }
+            }
+            UnwindAction::Terminate(UnwindTerminateReason::InCleanup) => {
+                if !is_cleanup {
+                    self.fail(
+                        location,
+                        "`UnwindAction::Terminate(InCleanup)` in a non-cleanup block",
+                    );
+                }
+            }
+            // These are allowed everywhere.
+            UnwindAction::Unreachable | UnwindAction::Terminate(UnwindTerminateReason::Abi) => (),
+        }
+    }
+
+    fn is_critical_call_edge(&self, target: Option<BasicBlock>, unwind: UnwindAction) -> bool {
+        let Some(target) = target else { return false };
+        matches!(unwind, UnwindAction::Cleanup(_) | UnwindAction::Terminate(_))
+            && self.body.basic_blocks.predecessors()[target].len() > 1
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
+    fn visit_local(&mut self, local: Local, _context: PlaceContext, location: Location) {
+        if self.body.local_decls.get(local).is_none() {
+            self.fail(
+                location,
+                format!("local {local:?} has no corresponding declaration in `body.local_decls`"),
+            );
+        }
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        match &statement.kind {
+            StatementKind::AscribeUserType(..) => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`AscribeUserType` should have been removed after drop lowering phase",
+                    );
+                }
+            }
+            StatementKind::FakeRead(..) => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`FakeRead` should have been removed after drop lowering phase",
+                    );
+                }
+            }
+            StatementKind::SetDiscriminant { .. } => {
+                if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
+                }
+            }
+            StatementKind::Deinit(..) => {
+                if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(location, "`Deinit`is not allowed until deaggregation");
+                }
+            }
+            StatementKind::Retag(kind, _) => {
+                // FIXME(JakobDegen) The validator should check that `self.mir_phase <
+                // DropsLowered`. However, this causes ICEs with generation of drop shims, which
+                // seem to fail to set their `MirPhase` correctly.
+                if matches!(kind, RetagKind::Raw | RetagKind::TwoPhase) {
+                    self.fail(location, format!("explicit `{kind:?}` is forbidden"));
+                }
+            }
+            StatementKind::Assign(..)
+            | StatementKind::StorageLive(_)
+            | StatementKind::StorageDead(_)
+            | StatementKind::Intrinsic(_)
+            | StatementKind::Coverage(_)
+            | StatementKind::ConstEvalCounter
+            | StatementKind::PlaceMention(..)
+            | StatementKind::Nop => {}
+        }
+
+        self.super_statement(statement, location);
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        match &terminator.kind {
+            TerminatorKind::Goto { target } => {
+                self.check_edge(location, *target, EdgeKind::Normal);
+            }
+            TerminatorKind::SwitchInt { targets, discr: _ } => {
+                for (_, target) in targets.iter() {
+                    self.check_edge(location, target, EdgeKind::Normal);
+                }
+                self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
+
+                self.value_cache.clear();
+                self.value_cache.extend(targets.iter().map(|(value, _)| value));
+                let has_duplicates = targets.iter().len() != self.value_cache.len();
+                if has_duplicates {
+                    self.fail(
+                        location,
+                        format!(
+                            "duplicated values in `SwitchInt` terminator: {:?}",
+                            terminator.kind,
+                        ),
+                    );
+                }
+            }
+            TerminatorKind::Drop { target, unwind, .. } => {
+                self.check_edge(location, *target, EdgeKind::Normal);
+                self.check_unwind_edge(location, *unwind);
+            }
+            TerminatorKind::Call { args, destination, target, unwind, .. } => {
+                if let Some(target) = target {
+                    self.check_edge(location, *target, EdgeKind::Normal);
+                }
+                self.check_unwind_edge(location, *unwind);
+
+                // The code generation assumes that there are no critical call edges. The assumption
+                // is used to simplify inserting code that should be executed along the return edge
+                // from the call. FIXME(tmiasko): Since this is a strictly code generation concern,
+                // the code generation should be responsible for handling it.
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Optimized)
+                    && self.is_critical_call_edge(*target, *unwind)
+                {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered critical edge in `Call` terminator {:?}",
+                            terminator.kind,
+                        ),
+                    );
+                }
+
+                // The call destination place and Operand::Move place used as an argument might be
+                // passed by a reference to the callee. Consequently they cannot be packed.
+                if is_within_packed(self.tcx, &self.body.local_decls, *destination).is_some() {
+                    // This is bad! The callee will expect the memory to be aligned.
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered packed place in `Call` terminator destination: {:?}",
+                            terminator.kind,
+                        ),
+                    );
+                }
+                for arg in args {
+                    if let Operand::Move(place) = &arg.node {
+                        if is_within_packed(self.tcx, &self.body.local_decls, *place).is_some() {
+                            // This is bad! The callee will expect the memory to be aligned.
+                            self.fail(
+                                location,
+                                format!(
+                                    "encountered `Move` of a packed place in `Call` terminator: {:?}",
+                                    terminator.kind,
+                                ),
+                            );
+                        }
+                    }
+                }
+            }
+            TerminatorKind::Assert { target, unwind, .. } => {
+                self.check_edge(location, *target, EdgeKind::Normal);
+                self.check_unwind_edge(location, *unwind);
+            }
+            TerminatorKind::Yield { resume, drop, .. } => {
+                if self.body.coroutine.is_none() {
+                    self.fail(location, "`Yield` cannot appear outside coroutine bodies");
+                }
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(location, "`Yield` should have been replaced by coroutine lowering");
+                }
+                self.check_edge(location, *resume, EdgeKind::Normal);
+                if let Some(drop) = drop {
+                    self.check_edge(location, *drop, EdgeKind::Normal);
+                }
+            }
+            TerminatorKind::FalseEdge { real_target, imaginary_target } => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`FalseEdge` should have been removed after drop elaboration",
+                    );
+                }
+                self.check_edge(location, *real_target, EdgeKind::Normal);
+                self.check_edge(location, *imaginary_target, EdgeKind::Normal);
+            }
+            TerminatorKind::FalseUnwind { real_target, unwind } => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`FalseUnwind` should have been removed after drop elaboration",
+                    );
+                }
+                self.check_edge(location, *real_target, EdgeKind::Normal);
+                self.check_unwind_edge(location, *unwind);
+            }
+            TerminatorKind::InlineAsm { targets, unwind, .. } => {
+                for &target in targets {
+                    self.check_edge(location, target, EdgeKind::Normal);
+                }
+                self.check_unwind_edge(location, *unwind);
+            }
+            TerminatorKind::CoroutineDrop => {
+                if self.body.coroutine.is_none() {
+                    self.fail(location, "`CoroutineDrop` cannot appear outside coroutine bodies");
+                }
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`CoroutineDrop` should have been replaced by coroutine lowering",
+                    );
+                }
+            }
+            TerminatorKind::UnwindResume => {
+                let bb = location.block;
+                if !self.body.basic_blocks[bb].is_cleanup {
+                    self.fail(location, "Cannot `UnwindResume` from non-cleanup basic block")
+                }
+                if !self.can_unwind {
+                    self.fail(location, "Cannot `UnwindResume` in a function that cannot unwind")
+                }
+            }
+            TerminatorKind::UnwindTerminate(_) => {
+                let bb = location.block;
+                if !self.body.basic_blocks[bb].is_cleanup {
+                    self.fail(location, "Cannot `UnwindTerminate` from non-cleanup basic block")
+                }
+            }
+            TerminatorKind::Return => {
+                let bb = location.block;
+                if self.body.basic_blocks[bb].is_cleanup {
+                    self.fail(location, "Cannot `Return` from cleanup basic block")
+                }
+            }
+            TerminatorKind::Unreachable => {}
+        }
+
+        self.super_terminator(terminator, location);
+    }
+
+    fn visit_source_scope(&mut self, scope: SourceScope) {
+        if self.body.source_scopes.get(scope).is_none() {
+            self.tcx.dcx().span_bug(
+                self.body.span,
+                format!(
+                    "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
+                    self.body.source.instance, self.when, scope,
+                ),
+            );
+        }
+    }
+}
+
+/// A faster version of the validation pass that only checks those things which may break when
+/// instantiating any generic parameters.
+pub fn validate_types<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    mir_phase: MirPhase,
+    param_env: ty::ParamEnv<'tcx>,
+    body: &Body<'tcx>,
+) -> Vec<(Location, String)> {
+    let mut type_checker = TypeChecker { body, tcx, param_env, mir_phase, failures: Vec::new() };
+    type_checker.visit_body(body);
+    type_checker.failures
+}
+
+struct TypeChecker<'a, 'tcx> {
+    body: &'a Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    mir_phase: MirPhase,
+    failures: Vec<(Location, String)>,
+}
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+    fn fail(&mut self, location: Location, msg: impl Into<String>) {
+        self.failures.push((location, msg.into()));
+    }
+
+    /// Check if src can be assigned into dest.
+    /// This is not precise, it will accept some incorrect assignments.
+    fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
+        // Fast path before we normalize.
+        if src == dest {
+            // Equal types, all is good.
+            return true;
+        }
+
+        // We sometimes have to use `defining_opaque_types` for subtyping
+        // to succeed here and figuring out how exactly that should work
+        // is annoying. It is harmless enough to just not validate anything
+        // in that case. We still check this after analysis as all opaque
+        // types have been revealed at this point.
+        if (src, dest).has_opaque_types() {
+            return true;
+        }
+
+        // After borrowck subtyping should be fully explicit via
+        // `Subtype` projections.
+        let variance = if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+            Variance::Invariant
+        } else {
+            Variance::Covariant
+        };
+
+        crate::util::relate_types(self.tcx, self.param_env, variance, src, dest)
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
+    fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
+        // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
+        if self.tcx.sess.opts.unstable_opts.validate_mir
+            && self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial)
+        {
+            // `Operand::Copy` is only supposed to be used with `Copy` types.
+            if let Operand::Copy(place) = operand {
+                let ty = place.ty(&self.body.local_decls, self.tcx).ty;
+
+                if !ty.is_copy_modulo_regions(self.tcx, self.param_env) {
+                    self.fail(location, format!("`Operand::Copy` with non-`Copy` type {ty}"));
+                }
+            }
+        }
+
+        self.super_operand(operand, location);
+    }
+
+    fn visit_projection_elem(
+        &mut self,
+        place_ref: PlaceRef<'tcx>,
+        elem: PlaceElem<'tcx>,
+        context: PlaceContext,
+        location: Location,
+    ) {
+        match elem {
+            ProjectionElem::OpaqueCast(ty)
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) =>
+            {
+                self.fail(
+                    location,
+                    format!("explicit opaque type cast to `{ty}` after `RevealAll`"),
+                )
+            }
+            ProjectionElem::Index(index) => {
+                let index_ty = self.body.local_decls[index].ty;
+                if index_ty != self.tcx.types.usize {
+                    self.fail(location, format!("bad index ({index_ty:?} != usize)"))
+                }
+            }
+            ProjectionElem::Deref
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::PostCleanup) =>
+            {
+                let base_ty = place_ref.ty(&self.body.local_decls, self.tcx).ty;
+
+                if base_ty.is_box() {
+                    self.fail(
+                        location,
+                        format!("{base_ty:?} dereferenced after ElaborateBoxDerefs"),
+                    )
+                }
+            }
+            ProjectionElem::Field(f, ty) => {
+                let parent_ty = place_ref.ty(&self.body.local_decls, self.tcx);
+                let fail_out_of_bounds = |this: &mut Self, location| {
+                    this.fail(location, format!("Out of bounds field {f:?} for {parent_ty:?}"));
+                };
+                let check_equal = |this: &mut Self, location, f_ty| {
+                    if !this.mir_assign_valid_types(ty, f_ty) {
+                        this.fail(
+                            location,
+                            format!(
+                                "Field projection `{place_ref:?}.{f:?}` specified type `{ty:?}`, but actual type is `{f_ty:?}`"
+                            )
+                        )
+                    }
+                };
+
+                let kind = match parent_ty.ty.kind() {
+                    &ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
+                        self.tcx.type_of(def_id).instantiate(self.tcx, args).kind()
+                    }
+                    kind => kind,
+                };
+
+                match kind {
+                    ty::Tuple(fields) => {
+                        let Some(f_ty) = fields.get(f.as_usize()) else {
+                            fail_out_of_bounds(self, location);
+                            return;
+                        };
+                        check_equal(self, location, *f_ty);
+                    }
+                    ty::Adt(adt_def, args) => {
+                        let var = parent_ty.variant_index.unwrap_or(FIRST_VARIANT);
+                        let Some(field) = adt_def.variant(var).fields.get(f) else {
+                            fail_out_of_bounds(self, location);
+                            return;
+                        };
+                        check_equal(self, location, field.ty(self.tcx, args));
+                    }
+                    ty::Closure(_, args) => {
+                        let args = args.as_closure();
+                        let Some(&f_ty) = args.upvar_tys().get(f.as_usize()) else {
+                            fail_out_of_bounds(self, location);
+                            return;
+                        };
+                        check_equal(self, location, f_ty);
+                    }
+                    ty::CoroutineClosure(_, args) => {
+                        let args = args.as_coroutine_closure();
+                        let Some(&f_ty) = args.upvar_tys().get(f.as_usize()) else {
+                            fail_out_of_bounds(self, location);
+                            return;
+                        };
+                        check_equal(self, location, f_ty);
+                    }
+                    &ty::Coroutine(def_id, args) => {
+                        let f_ty = if let Some(var) = parent_ty.variant_index {
+                            let gen_body = if def_id == self.body.source.def_id() {
+                                self.body
+                            } else {
+                                self.tcx.optimized_mir(def_id)
+                            };
+
+                            let Some(layout) = gen_body.coroutine_layout() else {
+                                self.fail(
+                                    location,
+                                    format!("No coroutine layout for {parent_ty:?}"),
+                                );
+                                return;
+                            };
+
+                            let Some(&local) = layout.variant_fields[var].get(f) else {
+                                fail_out_of_bounds(self, location);
+                                return;
+                            };
+
+                            let Some(f_ty) = layout.field_tys.get(local) else {
+                                self.fail(
+                                    location,
+                                    format!("Out of bounds local {local:?} for {parent_ty:?}"),
+                                );
+                                return;
+                            };
+
+                            ty::EarlyBinder::bind(f_ty.ty).instantiate(self.tcx, args)
+                        } else {
+                            let Some(&f_ty) = args.as_coroutine().prefix_tys().get(f.index())
+                            else {
+                                fail_out_of_bounds(self, location);
+                                return;
+                            };
+
+                            f_ty
+                        };
+
+                        check_equal(self, location, f_ty);
+                    }
+                    _ => {
+                        self.fail(location, format!("{:?} does not have fields", parent_ty.ty));
+                    }
+                }
+            }
+            ProjectionElem::Subtype(ty) => {
+                if !relate_types(
+                    self.tcx,
+                    self.param_env,
+                    Variance::Covariant,
+                    ty,
+                    place_ref.ty(&self.body.local_decls, self.tcx).ty,
+                ) {
+                    self.fail(
+                        location,
+                        format!(
+                            "Failed subtyping {ty:#?} and {:#?}",
+                            place_ref.ty(&self.body.local_decls, self.tcx).ty
+                        ),
+                    )
+                }
+            }
+            _ => {}
+        }
+        self.super_projection_elem(place_ref, elem, context, location);
+    }
+
+    fn visit_var_debug_info(&mut self, debuginfo: &VarDebugInfo<'tcx>) {
+        if let Some(box VarDebugInfoFragment { ty, ref projection }) = debuginfo.composite {
+            if ty.is_union() || ty.is_enum() {
+                self.fail(
+                    START_BLOCK.start_location(),
+                    format!("invalid type {ty:?} in debuginfo for {:?}", debuginfo.name),
+                );
+            }
+            if projection.is_empty() {
+                self.fail(
+                    START_BLOCK.start_location(),
+                    format!("invalid empty projection in debuginfo for {:?}", debuginfo.name),
+                );
+            }
+            if projection.iter().any(|p| !matches!(p, PlaceElem::Field(..))) {
+                self.fail(
+                    START_BLOCK.start_location(),
+                    format!(
+                        "illegal projection {:?} in debuginfo for {:?}",
+                        projection, debuginfo.name
+                    ),
+                );
+            }
+        }
+        match debuginfo.value {
+            VarDebugInfoContents::Const(_) => {}
+            VarDebugInfoContents::Place(place) => {
+                if place.projection.iter().any(|p| !p.can_use_in_debuginfo()) {
+                    self.fail(
+                        START_BLOCK.start_location(),
+                        format!("illegal place {:?} in debuginfo for {:?}", place, debuginfo.name),
+                    );
+                }
+            }
+        }
+        self.super_var_debug_info(debuginfo);
+    }
+
+    fn visit_place(&mut self, place: &Place<'tcx>, cntxt: PlaceContext, location: Location) {
+        // Set off any `bug!`s in the type computation code
+        let _ = place.ty(&self.body.local_decls, self.tcx);
+
+        if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial)
+            && place.projection.len() > 1
+            && cntxt != PlaceContext::NonUse(NonUseContext::VarDebugInfo)
+            && place.projection[1..].contains(&ProjectionElem::Deref)
+        {
+            self.fail(location, format!("{place:?}, has deref at the wrong place"));
+        }
+
+        self.super_place(place, cntxt, location);
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+        macro_rules! check_kinds {
+            ($t:expr, $text:literal, $typat:pat) => {
+                if !matches!(($t).kind(), $typat) {
+                    self.fail(location, format!($text, $t));
+                }
+            };
+        }
+        match rvalue {
+            Rvalue::Use(_) | Rvalue::CopyForDeref(_) => {}
+            Rvalue::Aggregate(kind, fields) => match **kind {
+                AggregateKind::Tuple => {}
+                AggregateKind::Array(dest) => {
+                    for src in fields {
+                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
+                            self.fail(location, "array field has the wrong type");
+                        }
+                    }
+                }
+                AggregateKind::Adt(def_id, idx, args, _, Some(field)) => {
+                    let adt_def = self.tcx.adt_def(def_id);
+                    assert!(adt_def.is_union());
+                    assert_eq!(idx, FIRST_VARIANT);
+                    let dest_ty = self.tcx.normalize_erasing_regions(
+                        self.param_env,
+                        adt_def.non_enum_variant().fields[field].ty(self.tcx, args),
+                    );
+                    if fields.len() == 1 {
+                        let src_ty = fields.raw[0].ty(self.body, self.tcx);
+                        if !self.mir_assign_valid_types(src_ty, dest_ty) {
+                            self.fail(location, "union field has the wrong type");
+                        }
+                    } else {
+                        self.fail(location, "unions should have one initialized field");
+                    }
+                }
+                AggregateKind::Adt(def_id, idx, args, _, None) => {
+                    let adt_def = self.tcx.adt_def(def_id);
+                    assert!(!adt_def.is_union());
+                    let variant = &adt_def.variants()[idx];
+                    if variant.fields.len() != fields.len() {
+                        self.fail(location, "adt has the wrong number of initialized fields");
+                    }
+                    for (src, dest) in std::iter::zip(fields, &variant.fields) {
+                        let dest_ty = self
+                            .tcx
+                            .normalize_erasing_regions(self.param_env, dest.ty(self.tcx, args));
+                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest_ty) {
+                            self.fail(location, "adt field has the wrong type");
+                        }
+                    }
+                }
+                AggregateKind::Closure(_, args) => {
+                    let upvars = args.as_closure().upvar_tys();
+                    if upvars.len() != fields.len() {
+                        self.fail(location, "closure has the wrong number of initialized fields");
+                    }
+                    for (src, dest) in std::iter::zip(fields, upvars) {
+                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
+                            self.fail(location, "closure field has the wrong type");
+                        }
+                    }
+                }
+                AggregateKind::Coroutine(_, args) => {
+                    let upvars = args.as_coroutine().upvar_tys();
+                    if upvars.len() != fields.len() {
+                        self.fail(location, "coroutine has the wrong number of initialized fields");
+                    }
+                    for (src, dest) in std::iter::zip(fields, upvars) {
+                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
+                            self.fail(location, "coroutine field has the wrong type");
+                        }
+                    }
+                }
+                AggregateKind::CoroutineClosure(_, args) => {
+                    let upvars = args.as_coroutine_closure().upvar_tys();
+                    if upvars.len() != fields.len() {
+                        self.fail(
+                            location,
+                            "coroutine-closure has the wrong number of initialized fields",
+                        );
+                    }
+                    for (src, dest) in std::iter::zip(fields, upvars) {
+                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
+                            self.fail(location, "coroutine-closure field has the wrong type");
+                        }
+                    }
+                }
+            },
+            Rvalue::Ref(_, BorrowKind::Fake, _) => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`Assign` statement with a `Fake` borrow should have been removed in runtime MIR",
+                    );
+                }
+            }
+            Rvalue::Ref(..) => {}
+            Rvalue::Len(p) => {
+                let pty = p.ty(&self.body.local_decls, self.tcx).ty;
+                check_kinds!(
+                    pty,
+                    "Cannot compute length of non-array type {:?}",
+                    ty::Array(..) | ty::Slice(..)
+                );
+            }
+            Rvalue::BinaryOp(op, vals) => {
+                use BinOp::*;
+                let a = vals.0.ty(&self.body.local_decls, self.tcx);
+                let b = vals.1.ty(&self.body.local_decls, self.tcx);
+                if crate::util::binop_right_homogeneous(*op) {
+                    if let Eq | Lt | Le | Ne | Ge | Gt = op {
+                        // The function pointer types can have lifetimes
+                        if !self.mir_assign_valid_types(a, b) {
+                            self.fail(
+                                location,
+                                format!("Cannot {op:?} compare incompatible types {a:?} and {b:?}"),
+                            );
+                        }
+                    } else if a != b {
+                        self.fail(
+                            location,
+                            format!(
+                                "Cannot perform binary op {op:?} on unequal types {a:?} and {b:?}"
+                            ),
+                        );
+                    }
+                }
+
+                match op {
+                    Offset => {
+                        check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..));
+                        if b != self.tcx.types.isize && b != self.tcx.types.usize {
+                            self.fail(location, format!("Cannot offset by non-isize type {b:?}"));
+                        }
+                    }
+                    Eq | Lt | Le | Ne | Ge | Gt => {
+                        for x in [a, b] {
+                            check_kinds!(
+                                x,
+                                "Cannot {op:?} compare type {:?}",
+                                ty::Bool
+                                    | ty::Char
+                                    | ty::Int(..)
+                                    | ty::Uint(..)
+                                    | ty::Float(..)
+                                    | ty::RawPtr(..)
+                                    | ty::FnPtr(..)
+                            )
+                        }
+                    }
+                    AddUnchecked | SubUnchecked | MulUnchecked | Shl | ShlUnchecked | Shr
+                    | ShrUnchecked => {
+                        for x in [a, b] {
+                            check_kinds!(
+                                x,
+                                "Cannot {op:?} non-integer type {:?}",
+                                ty::Uint(..) | ty::Int(..)
+                            )
+                        }
+                    }
+                    BitAnd | BitOr | BitXor => {
+                        for x in [a, b] {
+                            check_kinds!(
+                                x,
+                                "Cannot perform bitwise op {op:?} on type {:?}",
+                                ty::Uint(..) | ty::Int(..) | ty::Bool
+                            )
+                        }
+                    }
+                    Add | Sub | Mul | Div | Rem => {
+                        for x in [a, b] {
+                            check_kinds!(
+                                x,
+                                "Cannot perform arithmetic {op:?} on type {:?}",
+                                ty::Uint(..) | ty::Int(..) | ty::Float(..)
+                            )
+                        }
+                    }
+                }
+            }
+            Rvalue::CheckedBinaryOp(op, vals) => {
+                use BinOp::*;
+                let a = vals.0.ty(&self.body.local_decls, self.tcx);
+                let b = vals.1.ty(&self.body.local_decls, self.tcx);
+                match op {
+                    Add | Sub | Mul => {
+                        for x in [a, b] {
+                            check_kinds!(
+                                x,
+                                "Cannot perform checked arithmetic on type {:?}",
+                                ty::Uint(..) | ty::Int(..)
+                            )
+                        }
+                        if a != b {
+                            self.fail(
+                                location,
+                                format!(
+                                    "Cannot perform checked arithmetic on unequal types {a:?} and {b:?}"
+                                ),
+                            );
+                        }
+                    }
+                    _ => self.fail(location, format!("There is no checked version of {op:?}")),
+                }
+            }
+            Rvalue::UnaryOp(op, operand) => {
+                let a = operand.ty(&self.body.local_decls, self.tcx);
+                match op {
+                    UnOp::Neg => {
+                        check_kinds!(a, "Cannot negate type {:?}", ty::Int(..) | ty::Float(..))
+                    }
+                    UnOp::Not => {
+                        check_kinds!(
+                            a,
+                            "Cannot binary not type {:?}",
+                            ty::Int(..) | ty::Uint(..) | ty::Bool
+                        );
+                    }
+                }
+            }
+            Rvalue::ShallowInitBox(operand, _) => {
+                let a = operand.ty(&self.body.local_decls, self.tcx);
+                check_kinds!(a, "Cannot shallow init type {:?}", ty::RawPtr(..));
+            }
+            Rvalue::Cast(kind, operand, target_type) => {
+                let op_ty = operand.ty(self.body, self.tcx);
+                match kind {
+                    CastKind::DynStar => {
+                        // FIXME(dyn-star): make sure nothing needs to be done here.
+                    }
+                    // FIXME: Add Checks for these
+                    CastKind::PointerFromExposedAddress
+                    | CastKind::PointerExposeAddress
+                    | CastKind::PointerCoercion(_) => {}
+                    CastKind::IntToInt | CastKind::IntToFloat => {
+                        let input_valid = op_ty.is_integral() || op_ty.is_char() || op_ty.is_bool();
+                        let target_valid = target_type.is_numeric() || target_type.is_char();
+                        if !input_valid || !target_valid {
+                            self.fail(
+                                location,
+                                format!("Wrong cast kind {kind:?} for the type {op_ty}",),
+                            );
+                        }
+                    }
+                    CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
+                        if !(op_ty.is_any_ptr() && target_type.is_unsafe_ptr()) {
+                            self.fail(location, "Can't cast {op_ty} into 'Ptr'");
+                        }
+                    }
+                    CastKind::FloatToFloat | CastKind::FloatToInt => {
+                        if !op_ty.is_floating_point() || !target_type.is_numeric() {
+                            self.fail(
+                                location,
+                                format!(
+                                    "Trying to cast non 'Float' as {kind:?} into {target_type:?}"
+                                ),
+                            );
+                        }
+                    }
+                    CastKind::Transmute => {
+                        if let MirPhase::Runtime(..) = self.mir_phase {
+                            // Unlike `mem::transmute`, a MIR `Transmute` is well-formed
+                            // for any two `Sized` types, just potentially UB to run.
+
+                            if !self
+                                .tcx
+                                .normalize_erasing_regions(self.param_env, op_ty)
+                                .is_sized(self.tcx, self.param_env)
+                            {
+                                self.fail(
+                                    location,
+                                    format!("Cannot transmute from non-`Sized` type {op_ty:?}"),
+                                );
+                            }
+                            if !self
+                                .tcx
+                                .normalize_erasing_regions(self.param_env, *target_type)
+                                .is_sized(self.tcx, self.param_env)
+                            {
+                                self.fail(
+                                    location,
+                                    format!("Cannot transmute to non-`Sized` type {target_type:?}"),
+                                );
+                            }
+                        } else {
+                            self.fail(
+                                location,
+                                format!(
+                                    "Transmute is not supported in non-runtime phase {:?}.",
+                                    self.mir_phase
+                                ),
+                            );
+                        }
+                    }
+                }
+            }
+            Rvalue::NullaryOp(NullOp::OffsetOf(indices), container) => {
+                let fail_out_of_bounds = |this: &mut Self, location, field, ty| {
+                    this.fail(location, format!("Out of bounds field {field:?} for {ty:?}"));
+                };
+
+                let mut current_ty = *container;
+
+                for (variant, field) in indices.iter() {
+                    match current_ty.kind() {
+                        ty::Tuple(fields) => {
+                            if variant != FIRST_VARIANT {
+                                self.fail(
+                                    location,
+                                    format!("tried to get variant {variant:?} of tuple"),
+                                );
+                                return;
+                            }
+                            let Some(&f_ty) = fields.get(field.as_usize()) else {
+                                fail_out_of_bounds(self, location, field, current_ty);
+                                return;
+                            };
+
+                            current_ty = self.tcx.normalize_erasing_regions(self.param_env, f_ty);
+                        }
+                        ty::Adt(adt_def, args) => {
+                            let Some(field) = adt_def.variant(variant).fields.get(field) else {
+                                fail_out_of_bounds(self, location, field, current_ty);
+                                return;
+                            };
+
+                            let f_ty = field.ty(self.tcx, args);
+                            current_ty = self.tcx.normalize_erasing_regions(self.param_env, f_ty);
+                        }
+                        _ => {
+                            self.fail(
+                                location,
+                                format!("Cannot get offset ({variant:?}, {field:?}) from type {current_ty:?}"),
+                            );
+                            return;
+                        }
+                    }
+                }
+            }
+            Rvalue::Repeat(_, _)
+            | Rvalue::ThreadLocalRef(_)
+            | Rvalue::AddressOf(_, _)
+            | Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf | NullOp::UbCheck(_), _)
+            | Rvalue::Discriminant(_) => {}
+        }
+        self.super_rvalue(rvalue, location);
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        match &statement.kind {
+            StatementKind::Assign(box (dest, rvalue)) => {
+                // LHS and RHS of the assignment must have the same type.
+                let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
+                let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
+
+                if !self.mir_assign_valid_types(right_ty, left_ty) {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered `{:?}` with incompatible types:\n\
+                            left-hand side has type: {}\n\
+                            right-hand side has type: {}",
+                            statement.kind, left_ty, right_ty,
+                        ),
+                    );
+                }
+                if let Rvalue::CopyForDeref(place) = rvalue {
+                    if place.ty(&self.body.local_decls, self.tcx).ty.builtin_deref(true).is_none() {
+                        self.fail(
+                            location,
+                            "`CopyForDeref` should only be used for dereferenceable types",
+                        )
+                    }
+                }
+            }
+            StatementKind::AscribeUserType(..) => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`AscribeUserType` should have been removed after drop lowering phase",
+                    );
+                }
+            }
+            StatementKind::FakeRead(..) => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`FakeRead` should have been removed after drop lowering phase",
+                    );
+                }
+            }
+            StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(op)) => {
+                let ty = op.ty(&self.body.local_decls, self.tcx);
+                if !ty.is_bool() {
+                    self.fail(
+                        location,
+                        format!("`assume` argument must be `bool`, but got: `{ty}`"),
+                    );
+                }
+            }
+            StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(
+                CopyNonOverlapping { src, dst, count },
+            )) => {
+                let src_ty = src.ty(&self.body.local_decls, self.tcx);
+                let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
+                    src_deref.ty
+                } else {
+                    self.fail(
+                        location,
+                        format!("Expected src to be ptr in copy_nonoverlapping, got: {src_ty}"),
+                    );
+                    return;
+                };
+                let dst_ty = dst.ty(&self.body.local_decls, self.tcx);
+                let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) {
+                    dst_deref.ty
+                } else {
+                    self.fail(
+                        location,
+                        format!("Expected dst to be ptr in copy_nonoverlapping, got: {dst_ty}"),
+                    );
+                    return;
+                };
+                // since CopyNonOverlapping is parametrized by 1 type,
+                // we only need to check that they are equal and not keep an extra parameter.
+                if !self.mir_assign_valid_types(op_src_ty, op_dst_ty) {
+                    self.fail(location, format!("bad arg ({op_src_ty:?} != {op_dst_ty:?})"));
+                }
+
+                let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
+                if op_cnt_ty != self.tcx.types.usize {
+                    self.fail(location, format!("bad arg ({op_cnt_ty:?} != usize)"))
+                }
+            }
+            StatementKind::SetDiscriminant { place, .. } => {
+                if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
+                }
+                let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind();
+                if !matches!(pty, ty::Adt(..) | ty::Coroutine(..) | ty::Alias(ty::Opaque, ..)) {
+                    self.fail(
+                        location,
+                        format!(
+                            "`SetDiscriminant` is only allowed on ADTs and coroutines, not {pty:?}"
+                        ),
+                    );
+                }
+            }
+            StatementKind::Deinit(..) => {
+                if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(location, "`Deinit`is not allowed until deaggregation");
+                }
+            }
+            StatementKind::Retag(kind, _) => {
+                // FIXME(JakobDegen) The validator should check that `self.mir_phase <
+                // DropsLowered`. However, this causes ICEs with generation of drop shims, which
+                // seem to fail to set their `MirPhase` correctly.
+                if matches!(kind, RetagKind::Raw | RetagKind::TwoPhase) {
+                    self.fail(location, format!("explicit `{kind:?}` is forbidden"));
+                }
+            }
+            StatementKind::StorageLive(_)
+            | StatementKind::StorageDead(_)
+            | StatementKind::Coverage(_)
+            | StatementKind::ConstEvalCounter
+            | StatementKind::PlaceMention(..)
+            | StatementKind::Nop => {}
+        }
+
+        self.super_statement(statement, location);
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        match &terminator.kind {
+            TerminatorKind::SwitchInt { targets, discr } => {
+                let switch_ty = discr.ty(&self.body.local_decls, self.tcx);
+
+                let target_width = self.tcx.sess.target.pointer_width;
+
+                let size = Size::from_bits(match switch_ty.kind() {
+                    ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(),
+                    ty::Int(int) => int.normalize(target_width).bit_width().unwrap(),
+                    ty::Char => 32,
+                    ty::Bool => 1,
+                    other => bug!("unhandled type: {:?}", other),
+                });
+
+                for (value, _) in targets.iter() {
+                    if Scalar::<()>::try_from_uint(value, size).is_none() {
+                        self.fail(
+                            location,
+                            format!("the value {value:#x} is not a proper {switch_ty:?}"),
+                        )
+                    }
+                }
+            }
+            TerminatorKind::Call { func, .. } => {
+                let func_ty = func.ty(&self.body.local_decls, self.tcx);
+                match func_ty.kind() {
+                    ty::FnPtr(..) | ty::FnDef(..) => {}
+                    _ => self.fail(
+                        location,
+                        format!("encountered non-callable type {func_ty} in `Call` terminator"),
+                    ),
+                }
+            }
+            TerminatorKind::Assert { cond, .. } => {
+                let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
+                if cond_ty != self.tcx.types.bool {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered non-boolean condition of type {cond_ty} in `Assert` terminator"
+                        ),
+                    );
+                }
+            }
+            TerminatorKind::Goto { .. }
+            | TerminatorKind::Drop { .. }
+            | TerminatorKind::Yield { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::InlineAsm { .. }
+            | TerminatorKind::CoroutineDrop
+            | TerminatorKind::UnwindResume
+            | TerminatorKind::UnwindTerminate(_)
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable => {}
+        }
+
+        self.super_terminator(terminator, location);
+    }
+}
diff --git a/compiler/rustc_const_eval/src/util/alignment.rs b/compiler/rustc_const_eval/src/util/alignment.rs
new file mode 100644
index 00000000000..8642dfccd78
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/alignment.rs
@@ -0,0 +1,72 @@
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_target::abi::Align;
+
+/// Returns `true` if this place is allowed to be less aligned
+/// than its containing struct (because it is within a packed
+/// struct).
+pub fn is_disaligned<'tcx, L>(
+    tcx: TyCtxt<'tcx>,
+    local_decls: &L,
+    param_env: ty::ParamEnv<'tcx>,
+    place: Place<'tcx>,
+) -> bool
+where
+    L: HasLocalDecls<'tcx>,
+{
+    debug!("is_disaligned({:?})", place);
+    let Some(pack) = is_within_packed(tcx, local_decls, place) else {
+        debug!("is_disaligned({:?}) - not within packed", place);
+        return false;
+    };
+
+    let ty = place.ty(local_decls, tcx).ty;
+    let unsized_tail = || tcx.struct_tail_with_normalize(ty, |ty| ty, || {});
+    match tcx.layout_of(param_env.and(ty)) {
+        Ok(layout)
+            if layout.align.abi <= pack
+                && (layout.is_sized()
+                    || matches!(unsized_tail().kind(), ty::Slice(..) | ty::Str)) =>
+        {
+            // If the packed alignment is greater or equal to the field alignment, the type won't be
+            // further disaligned.
+            // However we need to ensure the field is sized; for unsized fields, `layout.align` is
+            // just an approximation -- except when the unsized tail is a slice, where the alignment
+            // is fully determined by the type.
+            debug!(
+                "is_disaligned({:?}) - align = {}, packed = {}; not disaligned",
+                place,
+                layout.align.abi.bytes(),
+                pack.bytes()
+            );
+            false
+        }
+        _ => {
+            // We cannot figure out the layout. Conservatively assume that this is disaligned.
+            debug!("is_disaligned({:?}) - true", place);
+            true
+        }
+    }
+}
+
+pub fn is_within_packed<'tcx, L>(
+    tcx: TyCtxt<'tcx>,
+    local_decls: &L,
+    place: Place<'tcx>,
+) -> Option<Align>
+where
+    L: HasLocalDecls<'tcx>,
+{
+    place
+        .iter_projections()
+        .rev()
+        // Stop at `Deref`; standard ABI alignment applies there.
+        .take_while(|(_base, elem)| !matches!(elem, ProjectionElem::Deref))
+        // Consider the packed alignments at play here...
+        .filter_map(|(base, _elem)| {
+            base.ty(local_decls, tcx).ty.ty_adt_def().and_then(|adt| adt.repr().pack)
+        })
+        // ... and compute their minimum.
+        // The overall smallest alignment is what matters.
+        .min()
+}
diff --git a/compiler/rustc_const_eval/src/util/caller_location.rs b/compiler/rustc_const_eval/src/util/caller_location.rs
new file mode 100644
index 00000000000..af9a4a4271d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/caller_location.rs
@@ -0,0 +1,71 @@
+use rustc_hir::LangItem;
+use rustc_middle::mir;
+use rustc_middle::query::TyCtxtAt;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::{self, Mutability};
+use rustc_span::symbol::Symbol;
+
+use crate::const_eval::{mk_eval_cx_to_read_const_val, CanAccessMutGlobal, CompileTimeEvalContext};
+use crate::interpret::*;
+
+/// Allocate a `const core::panic::Location` with the provided filename and line/column numbers.
+fn alloc_caller_location<'mir, 'tcx>(
+    ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
+    filename: Symbol,
+    line: u32,
+    col: u32,
+) -> MPlaceTy<'tcx> {
+    let loc_details = ecx.tcx.sess.opts.unstable_opts.location_detail;
+    // This can fail if rustc runs out of memory right here. Trying to emit an error would be
+    // pointless, since that would require allocating more memory than these short strings.
+    let file = if loc_details.file {
+        ecx.allocate_str(filename.as_str(), MemoryKind::CallerLocation, Mutability::Not).unwrap()
+    } else {
+        // FIXME: This creates a new allocation each time. It might be preferable to
+        // perform this allocation only once, and re-use the `MPlaceTy`.
+        // See https://github.com/rust-lang/rust/pull/89920#discussion_r730012398
+        ecx.allocate_str("<redacted>", MemoryKind::CallerLocation, Mutability::Not).unwrap()
+    };
+    let file = file.map_provenance(CtfeProvenance::as_immutable);
+    let line = if loc_details.line { Scalar::from_u32(line) } else { Scalar::from_u32(0) };
+    let col = if loc_details.column { Scalar::from_u32(col) } else { Scalar::from_u32(0) };
+
+    // Allocate memory for `CallerLocation` struct.
+    let loc_ty = ecx
+        .tcx
+        .type_of(ecx.tcx.require_lang_item(LangItem::PanicLocation, None))
+        .instantiate(*ecx.tcx, ecx.tcx.mk_args(&[ecx.tcx.lifetimes.re_erased.into()]));
+    let loc_layout = ecx.layout_of(loc_ty).unwrap();
+    let location = ecx.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
+
+    // Initialize fields.
+    ecx.write_immediate(file.to_ref(ecx), &ecx.project_field(&location, 0).unwrap())
+        .expect("writing to memory we just allocated cannot fail");
+    ecx.write_scalar(line, &ecx.project_field(&location, 1).unwrap())
+        .expect("writing to memory we just allocated cannot fail");
+    ecx.write_scalar(col, &ecx.project_field(&location, 2).unwrap())
+        .expect("writing to memory we just allocated cannot fail");
+
+    location
+}
+
+pub(crate) fn const_caller_location_provider(
+    tcx: TyCtxtAt<'_>,
+    file: Symbol,
+    line: u32,
+    col: u32,
+) -> mir::ConstValue<'_> {
+    trace!("const_caller_location: {}:{}:{}", file, line, col);
+    let mut ecx = mk_eval_cx_to_read_const_val(
+        tcx.tcx,
+        tcx.span,
+        ty::ParamEnv::reveal_all(),
+        CanAccessMutGlobal::No,
+    );
+
+    let loc_place = alloc_caller_location(&mut ecx, file, line, col);
+    if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
+        bug!("intern_const_alloc_recursive should not error in this case")
+    }
+    mir::ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr(), &tcx))
+}
diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
new file mode 100644
index 00000000000..8c4af5e5132
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
@@ -0,0 +1,159 @@
+use rustc_middle::ty::layout::{LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement};
+use rustc_middle::ty::{ParamEnv, ParamEnvAnd, Ty, TyCtxt};
+use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants};
+
+use crate::const_eval::{CanAccessMutGlobal, CheckAlignment, CompileTimeInterpreter};
+use crate::interpret::{InterpCx, MemoryKind, OpTy};
+
+/// Determines if this type permits "raw" initialization by just transmuting some memory into an
+/// instance of `T`.
+///
+/// `init_kind` indicates if the memory is zero-initialized or left uninitialized. We assume
+/// uninitialized memory is mitigated by filling it with 0x01, which reduces the chance of causing
+/// LLVM UB.
+///
+/// By default we check whether that operation would cause *LLVM UB*, i.e., whether the LLVM IR we
+/// generate has UB or not. This is a mitigation strategy, which is why we are okay with accepting
+/// Rust UB as long as there is no risk of miscompilations. The `strict_init_checks` can be set to
+/// do a full check against Rust UB instead (in which case we will also ignore the 0x01-filling and
+/// to the full uninit check).
+pub fn check_validity_requirement<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    kind: ValidityRequirement,
+    param_env_and_ty: ParamEnvAnd<'tcx, Ty<'tcx>>,
+) -> Result<bool, &'tcx LayoutError<'tcx>> {
+    let layout = tcx.layout_of(param_env_and_ty)?;
+
+    // There is nothing strict or lax about inhabitedness.
+    if kind == ValidityRequirement::Inhabited {
+        return Ok(!layout.abi.is_uninhabited());
+    }
+
+    if kind == ValidityRequirement::Uninit || tcx.sess.opts.unstable_opts.strict_init_checks {
+        might_permit_raw_init_strict(layout, tcx, kind)
+    } else {
+        let layout_cx = LayoutCx { tcx, param_env: param_env_and_ty.param_env };
+        might_permit_raw_init_lax(layout, &layout_cx, kind)
+    }
+}
+
+/// Implements the 'strict' version of the `might_permit_raw_init` checks; see that function for
+/// details.
+fn might_permit_raw_init_strict<'tcx>(
+    ty: TyAndLayout<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    kind: ValidityRequirement,
+) -> Result<bool, &'tcx LayoutError<'tcx>> {
+    let machine = CompileTimeInterpreter::new(CanAccessMutGlobal::No, CheckAlignment::Error);
+
+    let mut cx = InterpCx::new(tcx, rustc_span::DUMMY_SP, ParamEnv::reveal_all(), machine);
+
+    let allocated = cx
+        .allocate(ty, MemoryKind::Machine(crate::const_eval::MemoryKind::Heap))
+        .expect("OOM: failed to allocate for uninit check");
+
+    if kind == ValidityRequirement::Zero {
+        cx.write_bytes_ptr(
+            allocated.ptr(),
+            std::iter::repeat(0_u8).take(ty.layout.size().bytes_usize()),
+        )
+        .expect("failed to write bytes for zero valid check");
+    }
+
+    let ot: OpTy<'_, _> = allocated.into();
+
+    // Assume that if it failed, it's a validation failure.
+    // This does *not* actually check that references are dereferenceable, but since all types that
+    // require dereferenceability also require non-null, we don't actually get any false negatives
+    // due to this.
+    Ok(cx.validate_operand(&ot).is_ok())
+}
+
+/// Implements the 'lax' (default) version of the `might_permit_raw_init` checks; see that function for
+/// details.
+fn might_permit_raw_init_lax<'tcx>(
+    this: TyAndLayout<'tcx>,
+    cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+    init_kind: ValidityRequirement,
+) -> Result<bool, &'tcx LayoutError<'tcx>> {
+    let scalar_allows_raw_init = move |s: Scalar| -> bool {
+        match init_kind {
+            ValidityRequirement::Inhabited => {
+                bug!("ValidityRequirement::Inhabited should have been handled above")
+            }
+            ValidityRequirement::Zero => {
+                // The range must contain 0.
+                s.valid_range(cx).contains(0)
+            }
+            ValidityRequirement::UninitMitigated0x01Fill => {
+                // The range must include an 0x01-filled buffer.
+                let mut val: u128 = 0x01;
+                for _ in 1..s.size(cx).bytes() {
+                    // For sizes >1, repeat the 0x01.
+                    val = (val << 8) | 0x01;
+                }
+                s.valid_range(cx).contains(val)
+            }
+            ValidityRequirement::Uninit => {
+                bug!("ValidityRequirement::Uninit should have been handled above")
+            }
+        }
+    };
+
+    // Check the ABI.
+    let valid = match this.abi {
+        Abi::Uninhabited => false, // definitely UB
+        Abi::Scalar(s) => scalar_allows_raw_init(s),
+        Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
+        Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
+        Abi::Aggregate { .. } => true, // Fields are checked below.
+    };
+    if !valid {
+        // This is definitely not okay.
+        return Ok(false);
+    }
+
+    // Special magic check for references and boxes (i.e., special pointer types).
+    if let Some(pointee) = this.ty.builtin_deref(false) {
+        let pointee = cx.layout_of(pointee.ty)?;
+        // We need to ensure that the LLVM attributes `aligned` and `dereferenceable(size)` are satisfied.
+        if pointee.align.abi.bytes() > 1 {
+            // 0x01-filling is not aligned.
+            return Ok(false);
+        }
+        if pointee.size.bytes() > 0 {
+            // A 'fake' integer pointer is not sufficiently dereferenceable.
+            return Ok(false);
+        }
+    }
+
+    // If we have not found an error yet, we need to recursively descend into fields.
+    match &this.fields {
+        FieldsShape::Primitive | FieldsShape::Union { .. } => {}
+        FieldsShape::Array { .. } => {
+            // Arrays never have scalar layout in LLVM, so if the array is not actually
+            // accessed, there is no LLVM UB -- therefore we can skip this.
+        }
+        FieldsShape::Arbitrary { offsets, .. } => {
+            for idx in 0..offsets.len() {
+                if !might_permit_raw_init_lax(this.field(cx, idx), cx, init_kind)? {
+                    // We found a field that is unhappy with this kind of initialization.
+                    return Ok(false);
+                }
+            }
+        }
+    }
+
+    match &this.variants {
+        Variants::Single { .. } => {
+            // All fields of this single variant have already been checked above, there is nothing
+            // else to do.
+        }
+        Variants::Multiple { .. } => {
+            // We cannot tell LLVM anything about the details of this multi-variant layout, so
+            // invalid values "hidden" inside the variant cannot cause LLVM trouble.
+        }
+    }
+
+    Ok(true)
+}
diff --git a/compiler/rustc_const_eval/src/util/compare_types.rs b/compiler/rustc_const_eval/src/util/compare_types.rs
new file mode 100644
index 00000000000..3ea54146fc7
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/compare_types.rs
@@ -0,0 +1,58 @@
+//! Routines to check for relations between fully inferred types.
+//!
+//! FIXME: Move this to a more general place. The utility of this extends to
+//! other areas of the compiler as well.
+
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::traits::ObligationCause;
+use rustc_middle::ty::{ParamEnv, Ty, TyCtxt, Variance};
+use rustc_trait_selection::traits::ObligationCtxt;
+
+/// Returns whether the two types are equal up to subtyping.
+///
+/// This is used in case we don't know the expected subtyping direction
+/// and still want to check whether anything is broken.
+pub fn is_equal_up_to_subtyping<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    src: Ty<'tcx>,
+    dest: Ty<'tcx>,
+) -> bool {
+    // Fast path.
+    if src == dest {
+        return true;
+    }
+
+    // Check for subtyping in either direction.
+    relate_types(tcx, param_env, Variance::Covariant, src, dest)
+        || relate_types(tcx, param_env, Variance::Covariant, dest, src)
+}
+
+/// Returns whether `src` is a subtype of `dest`, i.e. `src <: dest`.
+///
+/// When validating assignments, the variance should be `Covariant`. When checking
+/// during `MirPhase` >= `MirPhase::Runtime(RuntimePhase::Initial)` variance should be `Invariant`
+/// because we want to check for type equality.
+pub fn relate_types<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    variance: Variance,
+    src: Ty<'tcx>,
+    dest: Ty<'tcx>,
+) -> bool {
+    if src == dest {
+        return true;
+    }
+
+    let mut builder = tcx.infer_ctxt().ignoring_regions();
+    let infcx = builder.build();
+    let ocx = ObligationCtxt::new(&infcx);
+    let cause = ObligationCause::dummy();
+    let src = ocx.normalize(&cause, param_env, src);
+    let dest = ocx.normalize(&cause, param_env, dest);
+    match ocx.relate(&cause, param_env, variance, src, dest) {
+        Ok(()) => {}
+        Err(_) => return false,
+    };
+    ocx.select_all_or_error().is_empty()
+}
diff --git a/compiler/rustc_const_eval/src/util/mod.rs b/compiler/rustc_const_eval/src/util/mod.rs
new file mode 100644
index 00000000000..a8060463b69
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/mod.rs
@@ -0,0 +1,36 @@
+use rustc_middle::mir;
+
+mod alignment;
+pub(crate) mod caller_location;
+mod check_validity_requirement;
+mod compare_types;
+mod type_name;
+
+pub use self::alignment::{is_disaligned, is_within_packed};
+pub use self::check_validity_requirement::check_validity_requirement;
+pub use self::compare_types::{is_equal_up_to_subtyping, relate_types};
+pub use self::type_name::type_name;
+
+/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
+/// same type as the result.
+#[inline]
+pub fn binop_left_homogeneous(op: mir::BinOp) -> bool {
+    use rustc_middle::mir::BinOp::*;
+    match op {
+        Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Div | Rem | BitXor
+        | BitAnd | BitOr | Offset | Shl | ShlUnchecked | Shr | ShrUnchecked => true,
+        Eq | Ne | Lt | Le | Gt | Ge => false,
+    }
+}
+
+/// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
+/// same type as the LHS.
+#[inline]
+pub fn binop_right_homogeneous(op: mir::BinOp) -> bool {
+    use rustc_middle::mir::BinOp::*;
+    match op {
+        Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Div | Rem | BitXor
+        | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
+        Offset | Shl | ShlUnchecked | Shr | ShrUnchecked => false,
+    }
+}
diff --git a/compiler/rustc_const_eval/src/util/type_name.rs b/compiler/rustc_const_eval/src/util/type_name.rs
new file mode 100644
index 00000000000..2b80623ab45
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/type_name.rs
@@ -0,0 +1,184 @@
+use rustc_data_structures::intern::Interned;
+use rustc_hir::def_id::CrateNum;
+use rustc_hir::definitions::DisambiguatedDefPathData;
+use rustc_middle::ty::{
+    self,
+    print::{PrettyPrinter, Print, PrintError, Printer},
+    GenericArg, GenericArgKind, Ty, TyCtxt,
+};
+use std::fmt::Write;
+
+struct AbsolutePathPrinter<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    path: String,
+}
+
+impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn print_region(&mut self, _region: ty::Region<'_>) -> Result<(), PrintError> {
+        Ok(())
+    }
+
+    fn print_type(&mut self, ty: Ty<'tcx>) -> Result<(), PrintError> {
+        match *ty.kind() {
+            // Types without identity.
+            ty::Bool
+            | ty::Char
+            | ty::Int(_)
+            | ty::Uint(_)
+            | ty::Float(_)
+            | ty::Str
+            | ty::Array(_, _)
+            | ty::Slice(_)
+            | ty::RawPtr(_)
+            | ty::Ref(_, _, _)
+            | ty::FnPtr(_)
+            | ty::Never
+            | ty::Tuple(_)
+            | ty::Dynamic(_, _, _) => self.pretty_print_type(ty),
+
+            // Placeholders (all printed as `_` to uniformize them).
+            ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error(_) => {
+                write!(self, "_")?;
+                Ok(())
+            }
+
+            // Types with identity (print the module path).
+            ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), args)
+            | ty::FnDef(def_id, args)
+            | ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, args, .. })
+            | ty::Closure(def_id, args)
+            | ty::CoroutineClosure(def_id, args)
+            | ty::Coroutine(def_id, args) => self.print_def_path(def_id, args),
+            ty::Foreign(def_id) => self.print_def_path(def_id, &[]),
+
+            ty::Alias(ty::Weak, _) => bug!("type_name: unexpected weak projection"),
+            ty::Alias(ty::Inherent, _) => bug!("type_name: unexpected inherent projection"),
+            ty::CoroutineWitness(..) => bug!("type_name: unexpected `CoroutineWitness`"),
+        }
+    }
+
+    fn print_const(&mut self, ct: ty::Const<'tcx>) -> Result<(), PrintError> {
+        self.pretty_print_const(ct, false)
+    }
+
+    fn print_dyn_existential(
+        &mut self,
+        predicates: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
+    ) -> Result<(), PrintError> {
+        self.pretty_print_dyn_existential(predicates)
+    }
+
+    fn path_crate(&mut self, cnum: CrateNum) -> Result<(), PrintError> {
+        self.path.push_str(self.tcx.crate_name(cnum).as_str());
+        Ok(())
+    }
+
+    fn path_qualified(
+        &mut self,
+        self_ty: Ty<'tcx>,
+        trait_ref: Option<ty::TraitRef<'tcx>>,
+    ) -> Result<(), PrintError> {
+        self.pretty_path_qualified(self_ty, trait_ref)
+    }
+
+    fn path_append_impl(
+        &mut self,
+        print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>,
+        _disambiguated_data: &DisambiguatedDefPathData,
+        self_ty: Ty<'tcx>,
+        trait_ref: Option<ty::TraitRef<'tcx>>,
+    ) -> Result<(), PrintError> {
+        self.pretty_path_append_impl(
+            |cx| {
+                print_prefix(cx)?;
+
+                cx.path.push_str("::");
+
+                Ok(())
+            },
+            self_ty,
+            trait_ref,
+        )
+    }
+
+    fn path_append(
+        &mut self,
+        print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>,
+        disambiguated_data: &DisambiguatedDefPathData,
+    ) -> Result<(), PrintError> {
+        print_prefix(self)?;
+
+        write!(self.path, "::{}", disambiguated_data.data).unwrap();
+
+        Ok(())
+    }
+
+    fn path_generic_args(
+        &mut self,
+        print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>,
+        args: &[GenericArg<'tcx>],
+    ) -> Result<(), PrintError> {
+        print_prefix(self)?;
+        let args =
+            args.iter().cloned().filter(|arg| !matches!(arg.unpack(), GenericArgKind::Lifetime(_)));
+        if args.clone().next().is_some() {
+            self.generic_delimiters(|cx| cx.comma_sep(args))
+        } else {
+            Ok(())
+        }
+    }
+}
+
+impl<'tcx> PrettyPrinter<'tcx> for AbsolutePathPrinter<'tcx> {
+    fn should_print_region(&self, _region: ty::Region<'_>) -> bool {
+        false
+    }
+    fn comma_sep<T>(&mut self, mut elems: impl Iterator<Item = T>) -> Result<(), PrintError>
+    where
+        T: Print<'tcx, Self>,
+    {
+        if let Some(first) = elems.next() {
+            first.print(self)?;
+            for elem in elems {
+                self.path.push_str(", ");
+                elem.print(self)?;
+            }
+        }
+        Ok(())
+    }
+
+    fn generic_delimiters(
+        &mut self,
+        f: impl FnOnce(&mut Self) -> Result<(), PrintError>,
+    ) -> Result<(), PrintError> {
+        write!(self, "<")?;
+
+        f(self)?;
+
+        write!(self, ">")?;
+
+        Ok(())
+    }
+
+    fn should_print_verbose(&self) -> bool {
+        // `std::any::type_name` should never print verbose type names
+        false
+    }
+}
+
+impl Write for AbsolutePathPrinter<'_> {
+    fn write_str(&mut self, s: &str) -> std::fmt::Result {
+        self.path.push_str(s);
+        Ok(())
+    }
+}
+
+pub fn type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> String {
+    let mut printer = AbsolutePathPrinter { tcx, path: String::new() };
+    printer.print_type(ty).unwrap();
+    printer.path
+}