about summary refs log tree commit diff
path: root/compiler/rustc_const_eval/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval/src')
-rw-r--r--compiler/rustc_const_eval/src/const_eval/error.rs210
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs399
-rw-r--r--compiler/rustc_const_eval/src/const_eval/fn_queries.rs112
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs474
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs207
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs365
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs1049
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs437
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs585
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs123
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs197
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs479
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs1186
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs33
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs762
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs417
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs1091
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs316
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs517
-rw-r--r--compiler/rustc_const_eval/src/interpret/traits.rs142
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs84
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs965
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs278
-rw-r--r--compiler/rustc_const_eval/src/lib.rs56
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs1110
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/mod.rs135
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/ops.rs628
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs123
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs272
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/resolver.rs216
-rw-r--r--compiler/rustc_const_eval/src/transform/mod.rs5
-rw-r--r--compiler/rustc_const_eval/src/transform/promote_consts.rs1092
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs523
-rw-r--r--compiler/rustc_const_eval/src/util/aggregate.rs75
-rw-r--r--compiler/rustc_const_eval/src/util/alignment.rs70
-rw-r--r--compiler/rustc_const_eval/src/util/collect_writes.rs36
-rw-r--r--compiler/rustc_const_eval/src/util/find_self_call.rs36
-rw-r--r--compiler/rustc_const_eval/src/util/mod.rs8
38 files changed, 14813 insertions, 0 deletions
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs
new file mode 100644
index 00000000000..5da16816625
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/error.rs
@@ -0,0 +1,210 @@
+use std::error::Error;
+use std::fmt;
+
+use rustc_errors::{DiagnosticBuilder, ErrorReported};
+use rustc_hir as hir;
+use rustc_middle::mir::AssertKind;
+use rustc_middle::ty::{layout::LayoutError, query::TyCtxtAt, ConstInt};
+use rustc_span::{Span, Symbol};
+
+use super::InterpCx;
+use crate::interpret::{
+    struct_error, ErrorHandled, FrameInfo, InterpError, InterpErrorInfo, Machine, MachineStopType,
+};
+
+/// The CTFE machine has some custom error kinds.
+#[derive(Clone, Debug)]
+pub enum ConstEvalErrKind {
+    NeedsRfc(String),
+    ConstAccessesStatic,
+    ModifiedGlobal,
+    AssertFailure(AssertKind<ConstInt>),
+    Panic { msg: Symbol, line: u32, col: u32, file: Symbol },
+    Abort(String),
+}
+
+impl MachineStopType for ConstEvalErrKind {
+    fn is_hard_err(&self) -> bool {
+        match self {
+            Self::Panic { .. } => true,
+            _ => false,
+        }
+    }
+}
+
+// The errors become `MachineStop` with plain strings when being raised.
+// `ConstEvalErr` (in `librustc_middle/mir/interpret/error.rs`) knows to
+// handle these.
+impl<'tcx> Into<InterpErrorInfo<'tcx>> for ConstEvalErrKind {
+    fn into(self) -> InterpErrorInfo<'tcx> {
+        err_machine_stop!(self).into()
+    }
+}
+
+impl fmt::Display for ConstEvalErrKind {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        use self::ConstEvalErrKind::*;
+        match *self {
+            NeedsRfc(ref msg) => {
+                write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg)
+            }
+            ConstAccessesStatic => write!(f, "constant accesses static"),
+            ModifiedGlobal => {
+                write!(f, "modifying a static's initial value from another static's initializer")
+            }
+            AssertFailure(ref msg) => write!(f, "{:?}", msg),
+            Panic { msg, line, col, file } => {
+                write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col)
+            }
+            Abort(ref msg) => write!(f, "{}", msg),
+        }
+    }
+}
+
+impl Error for ConstEvalErrKind {}
+
+/// When const-evaluation errors, this type is constructed with the resulting information,
+/// and then used to emit the error as a lint or hard error.
+#[derive(Debug)]
+pub struct ConstEvalErr<'tcx> {
+    pub span: Span,
+    pub error: InterpError<'tcx>,
+    pub stacktrace: Vec<FrameInfo<'tcx>>,
+}
+
+impl<'tcx> ConstEvalErr<'tcx> {
+    /// Turn an interpreter error into something to report to the user.
+    /// As a side-effect, if RUSTC_CTFE_BACKTRACE is set, this prints the backtrace.
+    /// Should be called only if the error is actually going to to be reported!
+    pub fn new<'mir, M: Machine<'mir, 'tcx>>(
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        error: InterpErrorInfo<'tcx>,
+        span: Option<Span>,
+    ) -> ConstEvalErr<'tcx>
+    where
+        'tcx: 'mir,
+    {
+        error.print_backtrace();
+        let stacktrace = ecx.generate_stacktrace();
+        ConstEvalErr {
+            error: error.into_kind(),
+            stacktrace,
+            span: span.unwrap_or_else(|| ecx.cur_span()),
+        }
+    }
+
+    pub fn struct_error(
+        &self,
+        tcx: TyCtxtAt<'tcx>,
+        message: &str,
+        emit: impl FnOnce(DiagnosticBuilder<'_>),
+    ) -> ErrorHandled {
+        self.struct_generic(tcx, message, emit, None)
+    }
+
+    pub fn report_as_error(&self, tcx: TyCtxtAt<'tcx>, message: &str) -> ErrorHandled {
+        self.struct_error(tcx, message, |mut e| e.emit())
+    }
+
+    pub fn report_as_lint(
+        &self,
+        tcx: TyCtxtAt<'tcx>,
+        message: &str,
+        lint_root: hir::HirId,
+        span: Option<Span>,
+    ) -> ErrorHandled {
+        self.struct_generic(
+            tcx,
+            message,
+            |mut lint: DiagnosticBuilder<'_>| {
+                // Apply the span.
+                if let Some(span) = span {
+                    let primary_spans = lint.span.primary_spans().to_vec();
+                    // point at the actual error as the primary span
+                    lint.replace_span_with(span);
+                    // point to the `const` statement as a secondary span
+                    // they don't have any label
+                    for sp in primary_spans {
+                        if sp != span {
+                            lint.span_label(sp, "");
+                        }
+                    }
+                }
+                lint.emit();
+            },
+            Some(lint_root),
+        )
+    }
+
+    /// Create a diagnostic for this const eval error.
+    ///
+    /// Sets the message passed in via `message` and adds span labels with detailed error
+    /// information before handing control back to `emit` to do any final processing.
+    /// It's the caller's responsibility to call emit(), stash(), etc. within the `emit`
+    /// function to dispose of the diagnostic properly.
+    ///
+    /// If `lint_root.is_some()` report it as a lint, else report it as a hard error.
+    /// (Except that for some errors, we ignore all that -- see `must_error` below.)
+    fn struct_generic(
+        &self,
+        tcx: TyCtxtAt<'tcx>,
+        message: &str,
+        emit: impl FnOnce(DiagnosticBuilder<'_>),
+        lint_root: Option<hir::HirId>,
+    ) -> ErrorHandled {
+        let finish = |mut err: DiagnosticBuilder<'_>, span_msg: Option<String>| {
+            trace!("reporting const eval failure at {:?}", self.span);
+            if let Some(span_msg) = span_msg {
+                err.span_label(self.span, span_msg);
+            }
+            // Add spans for the stacktrace. Don't print a single-line backtrace though.
+            if self.stacktrace.len() > 1 {
+                for frame_info in &self.stacktrace {
+                    err.span_label(frame_info.span, frame_info.to_string());
+                }
+            }
+            // Let the caller finish the job.
+            emit(err)
+        };
+
+        // Special handling for certain errors
+        match &self.error {
+            // Don't emit a new diagnostic for these errors
+            err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
+                return ErrorHandled::TooGeneric;
+            }
+            err_inval!(AlreadyReported(error_reported)) => {
+                return ErrorHandled::Reported(*error_reported);
+            }
+            err_inval!(Layout(LayoutError::SizeOverflow(_))) => {
+                // We must *always* hard error on these, even if the caller wants just a lint.
+                // The `message` makes little sense here, this is a more serious error than the
+                // caller thinks anyway.
+                // See <https://github.com/rust-lang/rust/pull/63152>.
+                finish(struct_error(tcx, &self.error.to_string()), None);
+                return ErrorHandled::Reported(ErrorReported);
+            }
+            _ => {}
+        };
+
+        let err_msg = self.error.to_string();
+
+        // Regular case - emit a lint.
+        if let Some(lint_root) = lint_root {
+            // Report as lint.
+            let hir_id =
+                self.stacktrace.iter().rev().find_map(|frame| frame.lint_root).unwrap_or(lint_root);
+            tcx.struct_span_lint_hir(
+                rustc_session::lint::builtin::CONST_ERR,
+                hir_id,
+                tcx.span,
+                |lint| finish(lint.build(message), Some(err_msg)),
+            );
+            ErrorHandled::Linted
+        } else {
+            // Report as hard error.
+            finish(struct_error(tcx, message), Some(err_msg));
+            ErrorHandled::Reported(ErrorReported)
+        }
+    }
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
new file mode 100644
index 00000000000..171fc45ea46
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -0,0 +1,399 @@
+use super::{CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr, MemoryExtra};
+use crate::interpret::eval_nullary_intrinsic;
+use crate::interpret::{
+    intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId,
+    Immediate, InternKind, InterpCx, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking, Scalar,
+    ScalarMaybeUninit, StackPopCleanup,
+};
+
+use rustc_errors::ErrorReported;
+use rustc_hir::def::DefKind;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::mir::pretty::display_allocation;
+use rustc_middle::traits::Reveal;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, subst::Subst, TyCtxt};
+use rustc_span::source_map::Span;
+use rustc_target::abi::Abi;
+use std::borrow::Cow;
+use std::convert::TryInto;
+
+pub fn note_on_undefined_behavior_error() -> &'static str {
+    "The rules on what exactly is undefined behavior aren't clear, \
+     so this check might be overzealous. Please open an issue on the rustc \
+     repository if you believe it should not be considered undefined behavior."
+}
+
+// Returns a pointer to where the result lives
+fn eval_body_using_ecx<'mir, 'tcx>(
+    ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
+    cid: GlobalId<'tcx>,
+    body: &'mir mir::Body<'tcx>,
+) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
+    debug!("eval_body_using_ecx: {:?}, {:?}", cid, ecx.param_env);
+    let tcx = *ecx.tcx;
+    assert!(
+        cid.promoted.is_some()
+            || matches!(
+                ecx.tcx.def_kind(cid.instance.def_id()),
+                DefKind::Const
+                    | DefKind::Static
+                    | DefKind::ConstParam
+                    | DefKind::AnonConst
+                    | DefKind::AssocConst
+            ),
+        "Unexpected DefKind: {:?}",
+        ecx.tcx.def_kind(cid.instance.def_id())
+    );
+    let layout = ecx.layout_of(body.return_ty().subst(tcx, cid.instance.substs))?;
+    assert!(!layout.is_unsized());
+    let ret = ecx.allocate(layout, MemoryKind::Stack)?;
+
+    let name =
+        with_no_trimmed_paths(|| ty::tls::with(|tcx| tcx.def_path_str(cid.instance.def_id())));
+    let prom = cid.promoted.map_or_else(String::new, |p| format!("::promoted[{:?}]", p));
+    trace!("eval_body_using_ecx: pushing stack frame for global: {}{}", name, prom);
+
+    ecx.push_stack_frame(
+        cid.instance,
+        body,
+        Some(&ret.into()),
+        StackPopCleanup::None { cleanup: false },
+    )?;
+
+    // The main interpreter loop.
+    ecx.run()?;
+
+    // Intern the result
+    let intern_kind = if cid.promoted.is_some() {
+        InternKind::Promoted
+    } else {
+        match tcx.static_mutability(cid.instance.def_id()) {
+            Some(m) => InternKind::Static(m),
+            None => InternKind::Constant,
+        }
+    };
+    intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
+
+    debug!("eval_body_using_ecx done: {:?}", *ret);
+    Ok(ret)
+}
+
+/// The `InterpCx` is only meant to be used to do field and index projections into constants for
+/// `simd_shuffle` and const patterns in match arms.
+///
+/// The function containing the `match` that is currently being analyzed may have generic bounds
+/// that inform us about the generic bounds of the constant. E.g., using an associated constant
+/// of a function's generic parameter will require knowledge about the bounds on the generic
+/// parameter. These bounds are passed to `mk_eval_cx` via the `ParamEnv` argument.
+pub(super) fn mk_eval_cx<'mir, 'tcx>(
+    tcx: TyCtxt<'tcx>,
+    root_span: Span,
+    param_env: ty::ParamEnv<'tcx>,
+    can_access_statics: bool,
+) -> CompileTimeEvalContext<'mir, 'tcx> {
+    debug!("mk_eval_cx: {:?}", param_env);
+    InterpCx::new(
+        tcx,
+        root_span,
+        param_env,
+        CompileTimeInterpreter::new(tcx.const_eval_limit()),
+        MemoryExtra { can_access_statics },
+    )
+}
+
+/// This function converts an interpreter value into a constant that is meant for use in the
+/// type system.
+pub(super) fn op_to_const<'tcx>(
+    ecx: &CompileTimeEvalContext<'_, 'tcx>,
+    op: &OpTy<'tcx>,
+) -> ConstValue<'tcx> {
+    // We do not have value optimizations for everything.
+    // Only scalars and slices, since they are very common.
+    // Note that further down we turn scalars of uninitialized bits back to `ByRef`. These can result
+    // from scalar unions that are initialized with one of their zero sized variants. We could
+    // instead allow `ConstValue::Scalar` to store `ScalarMaybeUninit`, but that would affect all
+    // the usual cases of extracting e.g. a `usize`, without there being a real use case for the
+    // `Undef` situation.
+    let try_as_immediate = match op.layout.abi {
+        Abi::Scalar(..) => true,
+        Abi::ScalarPair(..) => match op.layout.ty.kind() {
+            ty::Ref(_, inner, _) => match *inner.kind() {
+                ty::Slice(elem) => elem == ecx.tcx.types.u8,
+                ty::Str => true,
+                _ => false,
+            },
+            _ => false,
+        },
+        _ => false,
+    };
+    let immediate = if try_as_immediate {
+        Err(ecx.read_immediate(op).expect("normalization works on validated constants"))
+    } else {
+        // It is guaranteed that any non-slice scalar pair is actually ByRef here.
+        // When we come back from raw const eval, we are always by-ref. The only way our op here is
+        // by-val is if we are in destructure_const, i.e., if this is (a field of) something that we
+        // "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or
+        // structs containing such.
+        op.try_as_mplace()
+    };
+
+    // We know `offset` is relative to the allocation, so we can use `into_parts`.
+    let to_const_value = |mplace: &MPlaceTy<'_>| match mplace.ptr.into_parts() {
+        (Some(alloc_id), offset) => {
+            let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
+            ConstValue::ByRef { alloc, offset }
+        }
+        (None, offset) => {
+            assert!(mplace.layout.is_zst());
+            assert_eq!(
+                offset.bytes() % mplace.layout.align.abi.bytes(),
+                0,
+                "this MPlaceTy must come from a validated constant, thus we can assume the \
+                alignment is correct",
+            );
+            ConstValue::Scalar(Scalar::ZST)
+        }
+    };
+    match immediate {
+        Ok(ref mplace) => to_const_value(mplace),
+        // see comment on `let try_as_immediate` above
+        Err(imm) => match *imm {
+            Immediate::Scalar(x) => match x {
+                ScalarMaybeUninit::Scalar(s) => ConstValue::Scalar(s),
+                ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place()),
+            },
+            Immediate::ScalarPair(a, b) => {
+                // We know `offset` is relative to the allocation, so we can use `into_parts`.
+                let (data, start) = match ecx.scalar_to_ptr(a.check_init().unwrap()).into_parts() {
+                    (Some(alloc_id), offset) => {
+                        (ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
+                    }
+                    (None, _offset) => (
+                        ecx.tcx.intern_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
+                            b"" as &[u8],
+                        )),
+                        0,
+                    ),
+                };
+                let len = b.to_machine_usize(ecx).unwrap();
+                let start = start.try_into().unwrap();
+                let len: usize = len.try_into().unwrap();
+                ConstValue::Slice { data, start, end: start + len }
+            }
+        },
+    }
+}
+
+fn turn_into_const_value<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    constant: ConstAlloc<'tcx>,
+    key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ConstValue<'tcx> {
+    let cid = key.value;
+    let def_id = cid.instance.def.def_id();
+    let is_static = tcx.is_static(def_id);
+    let ecx = mk_eval_cx(tcx, tcx.def_span(key.value.instance.def_id()), key.param_env, is_static);
+
+    let mplace = ecx.raw_const_to_mplace(constant).expect(
+        "can only fail if layout computation failed, \
+        which should have given a good error before ever invoking this function",
+    );
+    assert!(
+        !is_static || cid.promoted.is_some(),
+        "the `eval_to_const_value_raw` query should not be used for statics, use `eval_to_allocation` instead"
+    );
+    // Turn this into a proper constant.
+    op_to_const(&ecx, &mplace.into())
+}
+
+pub fn eval_to_const_value_raw_provider<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ::rustc_middle::mir::interpret::EvalToConstValueResult<'tcx> {
+    // see comment in eval_to_allocation_raw_provider for what we're doing here
+    if key.param_env.reveal() == Reveal::All {
+        let mut key = key;
+        key.param_env = key.param_env.with_user_facing();
+        match tcx.eval_to_const_value_raw(key) {
+            // try again with reveal all as requested
+            Err(ErrorHandled::TooGeneric) => {}
+            // deduplicate calls
+            other => return other,
+        }
+    }
+
+    // We call `const_eval` for zero arg intrinsics, too, in order to cache their value.
+    // Catch such calls and evaluate them instead of trying to load a constant's MIR.
+    if let ty::InstanceDef::Intrinsic(def_id) = key.value.instance.def {
+        let ty = key.value.instance.ty(tcx, key.param_env);
+        let substs = match ty.kind() {
+            ty::FnDef(_, substs) => substs,
+            _ => bug!("intrinsic with type {:?}", ty),
+        };
+        return eval_nullary_intrinsic(tcx, key.param_env, def_id, substs).map_err(|error| {
+            let span = tcx.def_span(def_id);
+            let error = ConstEvalErr { error: error.into_kind(), stacktrace: vec![], span };
+            error.report_as_error(tcx.at(span), "could not evaluate nullary intrinsic")
+        });
+    }
+
+    tcx.eval_to_allocation_raw(key).map(|val| turn_into_const_value(tcx, val, key))
+}
+
+pub fn eval_to_allocation_raw_provider<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ::rustc_middle::mir::interpret::EvalToAllocationRawResult<'tcx> {
+    // Because the constant is computed twice (once per value of `Reveal`), we are at risk of
+    // reporting the same error twice here. To resolve this, we check whether we can evaluate the
+    // constant in the more restrictive `Reveal::UserFacing`, which most likely already was
+    // computed. For a large percentage of constants that will already have succeeded. Only
+    // associated constants of generic functions will fail due to not enough monomorphization
+    // information being available.
+
+    // In case we fail in the `UserFacing` variant, we just do the real computation.
+    if key.param_env.reveal() == Reveal::All {
+        let mut key = key;
+        key.param_env = key.param_env.with_user_facing();
+        match tcx.eval_to_allocation_raw(key) {
+            // try again with reveal all as requested
+            Err(ErrorHandled::TooGeneric) => {}
+            // deduplicate calls
+            other => return other,
+        }
+    }
+    if cfg!(debug_assertions) {
+        // Make sure we format the instance even if we do not print it.
+        // This serves as a regression test against an ICE on printing.
+        // The next two lines concatenated contain some discussion:
+        // https://rust-lang.zulipchat.com/#narrow/stream/146212-t-compiler.2Fconst-eval/
+        // subject/anon_const_instance_printing/near/135980032
+        let instance = with_no_trimmed_paths(|| key.value.instance.to_string());
+        trace!("const eval: {:?} ({})", key, instance);
+    }
+
+    let cid = key.value;
+    let def = cid.instance.def.with_opt_param();
+
+    if let Some(def) = def.as_local() {
+        if tcx.has_typeck_results(def.did) {
+            if let Some(error_reported) = tcx.typeck_opt_const_arg(def).tainted_by_errors {
+                return Err(ErrorHandled::Reported(error_reported));
+            }
+        }
+        if !tcx.is_mir_available(def.did) {
+            tcx.sess.delay_span_bug(
+                tcx.def_span(def.did),
+                &format!("no MIR body is available for {:?}", def.did),
+            );
+            return Err(ErrorHandled::Reported(ErrorReported {}));
+        }
+        if let Some(error_reported) = tcx.mir_const_qualif_opt_const_arg(def).error_occured {
+            return Err(ErrorHandled::Reported(error_reported));
+        }
+    }
+
+    let is_static = tcx.is_static(def.did);
+
+    let mut ecx = InterpCx::new(
+        tcx,
+        tcx.def_span(def.did),
+        key.param_env,
+        CompileTimeInterpreter::new(tcx.const_eval_limit()),
+        // Statics (and promoteds inside statics) may access other statics, because unlike consts
+        // they do not have to behave "as if" they were evaluated at runtime.
+        MemoryExtra { can_access_statics: is_static },
+    );
+
+    let res = ecx.load_mir(cid.instance.def, cid.promoted);
+    match res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, &body)) {
+        Err(error) => {
+            let err = ConstEvalErr::new(&ecx, error, None);
+            // Some CTFE errors raise just a lint, not a hard error; see
+            // <https://github.com/rust-lang/rust/issues/71800>.
+            let is_hard_err = if let Some(def) = def.as_local() {
+                // (Associated) consts only emit a lint, since they might be unused.
+                !matches!(tcx.def_kind(def.did.to_def_id()), DefKind::Const | DefKind::AssocConst)
+                    // check if the inner InterpError is hard
+                    || err.error.is_hard_err()
+            } else {
+                // use of broken constant from other crate: always an error
+                true
+            };
+
+            if is_hard_err {
+                let msg = if is_static {
+                    Cow::from("could not evaluate static initializer")
+                } else {
+                    // If the current item has generics, we'd like to enrich the message with the
+                    // instance and its substs: to show the actual compile-time values, in addition to
+                    // the expression, leading to the const eval error.
+                    let instance = &key.value.instance;
+                    if !instance.substs.is_empty() {
+                        let instance = with_no_trimmed_paths(|| instance.to_string());
+                        let msg = format!("evaluation of `{}` failed", instance);
+                        Cow::from(msg)
+                    } else {
+                        Cow::from("evaluation of constant value failed")
+                    }
+                };
+
+                Err(err.report_as_error(ecx.tcx.at(ecx.cur_span()), &msg))
+            } else {
+                let hir_id = tcx.hir().local_def_id_to_hir_id(def.as_local().unwrap().did);
+                Err(err.report_as_lint(
+                    tcx.at(tcx.def_span(def.did)),
+                    "any use of this value will cause an error",
+                    hir_id,
+                    Some(err.span),
+                ))
+            }
+        }
+        Ok(mplace) => {
+            // Since evaluation had no errors, validate the resulting constant.
+            // This is a separate `try` block to provide more targeted error reporting.
+            let validation = try {
+                let mut ref_tracking = RefTracking::new(mplace);
+                let mut inner = false;
+                while let Some((mplace, path)) = ref_tracking.todo.pop() {
+                    let mode = match tcx.static_mutability(cid.instance.def_id()) {
+                        Some(_) if cid.promoted.is_some() => {
+                            // Promoteds in statics are allowed to point to statics.
+                            CtfeValidationMode::Const { inner, allow_static_ptrs: true }
+                        }
+                        Some(_) => CtfeValidationMode::Regular, // a `static`
+                        None => CtfeValidationMode::Const { inner, allow_static_ptrs: false },
+                    };
+                    ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)?;
+                    inner = true;
+                }
+            };
+            let alloc_id = mplace.ptr.provenance.unwrap();
+            if let Err(error) = validation {
+                // Validation failed, report an error. This is always a hard error.
+                let err = ConstEvalErr::new(&ecx, error, None);
+                Err(err.struct_error(
+                    ecx.tcx,
+                    "it is undefined behavior to use this value",
+                    |mut diag| {
+                        diag.note(note_on_undefined_behavior_error());
+                        diag.note(&format!(
+                            "the raw bytes of the constant ({}",
+                            display_allocation(
+                                *ecx.tcx,
+                                ecx.tcx.global_alloc(alloc_id).unwrap_memory()
+                            )
+                        ));
+                        diag.emit();
+                    },
+                ))
+            } else {
+                // Convert to raw constant
+                Ok(ConstAlloc { alloc_id, ty: mplace.layout.ty })
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
new file mode 100644
index 00000000000..40419a4d201
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
@@ -0,0 +1,112 @@
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::hir::map::blocks::FnLikeNode;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::Symbol;
+use rustc_target::spec::abi::Abi;
+
+/// Whether the `def_id` counts as const fn in your current crate, considering all active
+/// feature gates
+pub fn is_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    tcx.is_const_fn_raw(def_id)
+        && match is_unstable_const_fn(tcx, def_id) {
+            Some(feature_name) => {
+                // has a `rustc_const_unstable` attribute, check whether the user enabled the
+                // corresponding feature gate.
+                tcx.features().declared_lib_features.iter().any(|&(sym, _)| sym == feature_name)
+            }
+            // functions without const stability are either stable user written
+            // const fn or the user is using feature gates and we thus don't
+            // care what they do
+            None => true,
+        }
+}
+
+/// Whether the `def_id` is an unstable const fn and what feature gate is necessary to enable it
+pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> {
+    if tcx.is_const_fn_raw(def_id) {
+        let const_stab = tcx.lookup_const_stability(def_id)?;
+        if const_stab.level.is_unstable() { Some(const_stab.feature) } else { None }
+    } else {
+        None
+    }
+}
+
+pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, hir_id: hir::HirId) -> bool {
+    let parent_id = tcx.hir().get_parent_did(hir_id);
+    if !parent_id.is_top_level_module() { is_const_impl_raw(tcx, parent_id) } else { false }
+}
+
+/// Checks whether the function has a `const` modifier or, in case it is an intrinsic, whether
+/// said intrinsic has a `rustc_const_{un,}stable` attribute.
+fn is_const_fn_raw(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+
+    let node = tcx.hir().get(hir_id);
+
+    if let hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..), .. }) =
+        node
+    {
+        // Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other
+        // foreign items cannot be evaluated at compile-time.
+        if let Abi::RustIntrinsic | Abi::PlatformIntrinsic = tcx.hir().get_foreign_abi(hir_id) {
+            tcx.lookup_const_stability(def_id).is_some()
+        } else {
+            false
+        }
+    } else if let Some(fn_like) = FnLikeNode::from_node(node) {
+        if fn_like.constness() == hir::Constness::Const {
+            return true;
+        }
+
+        // If the function itself is not annotated with `const`, it may still be a `const fn`
+        // if it resides in a const trait impl.
+        is_parent_const_impl_raw(tcx, hir_id)
+    } else if let hir::Node::Ctor(_) = node {
+        true
+    } else {
+        false
+    }
+}
+
+/// Checks whether the given item is an `impl` that has a `const` modifier.
+fn is_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+    let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+    let node = tcx.hir().get(hir_id);
+    matches!(
+        node,
+        hir::Node::Item(hir::Item {
+            kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const, .. }),
+            ..
+        })
+    )
+}
+
+fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    is_const_fn(tcx, def_id)
+        && match tcx.lookup_const_stability(def_id) {
+            Some(stab) => {
+                if cfg!(debug_assertions) && stab.promotable {
+                    let sig = tcx.fn_sig(def_id);
+                    assert_eq!(
+                        sig.unsafety(),
+                        hir::Unsafety::Normal,
+                        "don't mark const unsafe fns as promotable",
+                        // https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682
+                    );
+                }
+                stab.promotable
+            }
+            None => false,
+        }
+}
+
+pub fn provide(providers: &mut Providers) {
+    *providers = Providers {
+        is_const_fn_raw,
+        is_const_impl_raw: |tcx, def_id| is_const_impl_raw(tcx, def_id.expect_local()),
+        is_promotable_const_fn,
+        ..*providers
+    };
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
new file mode 100644
index 00000000000..8a90686b900
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -0,0 +1,474 @@
+use rustc_middle::mir;
+use rustc_middle::ty::{self, Ty};
+use std::borrow::Borrow;
+use std::collections::hash_map::Entry;
+use std::hash::Hash;
+
+use rustc_data_structures::fx::FxHashMap;
+use std::fmt;
+
+use rustc_ast::Mutability;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::AssertMessage;
+use rustc_session::Limit;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_target::abi::{Align, Size};
+use rustc_target::spec::abi::Abi;
+
+use crate::interpret::{
+    self, compile_time_machine, AllocId, Allocation, Frame, ImmTy, InterpCx, InterpResult, OpTy,
+    PlaceTy, Scalar, StackPopUnwind,
+};
+
+use super::error::*;
+
+impl<'mir, 'tcx> InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> {
+    /// "Intercept" a function call to a panic-related function
+    /// because we have something special to do for it.
+    /// If this returns successfully (`Ok`), the function should just be evaluated normally.
+    fn hook_panic_fn(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx>],
+    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
+        // The list of functions we handle here must be in sync with
+        // `is_lang_panic_fn` in `transform/check_consts/mod.rs`.
+        let def_id = instance.def_id();
+        if Some(def_id) == self.tcx.lang_items().panic_fn()
+            || Some(def_id) == self.tcx.lang_items().panic_str()
+            || Some(def_id) == self.tcx.lang_items().begin_panic_fn()
+        {
+            // &str
+            assert!(args.len() == 1);
+
+            let msg_place = self.deref_operand(&args[0])?;
+            let msg = Symbol::intern(self.read_str(&msg_place)?);
+            let span = self.find_closest_untracked_caller_location();
+            let (file, line, col) = self.location_triple_for_span(span);
+            return Err(ConstEvalErrKind::Panic { msg, file, line, col }.into());
+        } else if Some(def_id) == self.tcx.lang_items().panic_fmt()
+            || Some(def_id) == self.tcx.lang_items().begin_panic_fmt()
+        {
+            // For panic_fmt, call const_panic_fmt instead.
+            if let Some(const_panic_fmt) = self.tcx.lang_items().const_panic_fmt() {
+                return Ok(Some(
+                    ty::Instance::resolve(
+                        *self.tcx,
+                        ty::ParamEnv::reveal_all(),
+                        const_panic_fmt,
+                        self.tcx.intern_substs(&[]),
+                    )
+                    .unwrap()
+                    .unwrap(),
+                ));
+            }
+        }
+        Ok(None)
+    }
+}
+
+/// Extra machine state for CTFE, and the Machine instance
+pub struct CompileTimeInterpreter<'mir, 'tcx> {
+    /// For now, the number of terminators that can be evaluated before we throw a resource
+    /// exhaustion error.
+    ///
+    /// Setting this to `0` disables the limit and allows the interpreter to run forever.
+    pub steps_remaining: usize,
+
+    /// The virtual call stack.
+    pub(crate) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct MemoryExtra {
+    /// We need to make sure consts never point to anything mutable, even recursively. That is
+    /// relied on for pattern matching on consts with references.
+    /// To achieve this, two pieces have to work together:
+    /// * Interning makes everything outside of statics immutable.
+    /// * Pointers to allocations inside of statics can never leak outside, to a non-static global.
+    /// This boolean here controls the second part.
+    pub(super) can_access_statics: bool,
+}
+
+impl<'mir, 'tcx> CompileTimeInterpreter<'mir, 'tcx> {
+    pub(super) fn new(const_eval_limit: Limit) -> Self {
+        CompileTimeInterpreter { steps_remaining: const_eval_limit.0, stack: Vec::new() }
+    }
+}
+
+impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxHashMap<K, V> {
+    #[inline(always)]
+    fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
+    where
+        K: Borrow<Q>,
+    {
+        FxHashMap::contains_key(self, k)
+    }
+
+    #[inline(always)]
+    fn insert(&mut self, k: K, v: V) -> Option<V> {
+        FxHashMap::insert(self, k, v)
+    }
+
+    #[inline(always)]
+    fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
+    where
+        K: Borrow<Q>,
+    {
+        FxHashMap::remove(self, k)
+    }
+
+    #[inline(always)]
+    fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
+        self.iter().filter_map(move |(k, v)| f(k, &*v)).collect()
+    }
+
+    #[inline(always)]
+    fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E> {
+        match self.get(&k) {
+            Some(v) => Ok(v),
+            None => {
+                vacant()?;
+                bug!("The CTFE machine shouldn't ever need to extend the alloc_map when reading")
+            }
+        }
+    }
+
+    #[inline(always)]
+    fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E> {
+        match self.entry(k) {
+            Entry::Occupied(e) => Ok(e.into_mut()),
+            Entry::Vacant(e) => {
+                let v = vacant()?;
+                Ok(e.insert(v))
+            }
+        }
+    }
+}
+
+crate type CompileTimeEvalContext<'mir, 'tcx> =
+    InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>;
+
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+pub enum MemoryKind {
+    Heap,
+}
+
+impl fmt::Display for MemoryKind {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            MemoryKind::Heap => write!(f, "heap allocation"),
+        }
+    }
+}
+
+impl interpret::MayLeak for MemoryKind {
+    #[inline(always)]
+    fn may_leak(self) -> bool {
+        match self {
+            MemoryKind::Heap => false,
+        }
+    }
+}
+
+impl interpret::MayLeak for ! {
+    #[inline(always)]
+    fn may_leak(self) -> bool {
+        // `self` is uninhabited
+        self
+    }
+}
+
+impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
+    fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> bool {
+        match (a, b) {
+            // Comparisons between integers are always known.
+            (Scalar::Int { .. }, Scalar::Int { .. }) => a == b,
+            // Equality with integers can never be known for sure.
+            (Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => false,
+            // FIXME: return `true` for when both sides are the same pointer, *except* that
+            // some things (like functions and vtables) do not have stable addresses
+            // so we need to be careful around them (see e.g. #73722).
+            (Scalar::Ptr(..), Scalar::Ptr(..)) => false,
+        }
+    }
+
+    fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> bool {
+        match (a, b) {
+            // Comparisons between integers are always known.
+            (Scalar::Int(_), Scalar::Int(_)) => a != b,
+            // Comparisons of abstract pointers with null pointers are known if the pointer
+            // is in bounds, because if they are in bounds, the pointer can't be null.
+            // Inequality with integers other than null can never be known for sure.
+            (Scalar::Int(int), Scalar::Ptr(ptr, _)) | (Scalar::Ptr(ptr, _), Scalar::Int(int)) => {
+                int.is_null() && !self.memory.ptr_may_be_null(ptr.into())
+            }
+            // FIXME: return `true` for at least some comparisons where we can reliably
+            // determine the result of runtime inequality tests at compile-time.
+            // Examples include comparison of addresses in different static items.
+            (Scalar::Ptr(..), Scalar::Ptr(..)) => false,
+        }
+    }
+}
+
+impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, 'tcx> {
+    compile_time_machine!(<'mir, 'tcx>);
+
+    type MemoryKind = MemoryKind;
+
+    type MemoryExtra = MemoryExtra;
+
+    const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error
+
+    fn load_mir(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        instance: ty::InstanceDef<'tcx>,
+    ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+        match instance {
+            ty::InstanceDef::Item(def) => {
+                if ecx.tcx.is_ctfe_mir_available(def.did) {
+                    Ok(ecx.tcx.mir_for_ctfe_opt_const_arg(def))
+                } else {
+                    let path = ecx.tcx.def_path_str(def.did);
+                    Err(ConstEvalErrKind::NeedsRfc(format!("calling extern function `{}`", path))
+                        .into())
+                }
+            }
+            _ => Ok(ecx.tcx.instance_mir(instance)),
+        }
+    }
+
+    fn find_mir_or_eval_fn(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        _abi: Abi,
+        args: &[OpTy<'tcx>],
+        _ret: Option<(&PlaceTy<'tcx>, mir::BasicBlock)>,
+        _unwind: StackPopUnwind, // unwinding is not supported in consts
+    ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
+        debug!("find_mir_or_eval_fn: {:?}", instance);
+
+        // Only check non-glue functions
+        if let ty::InstanceDef::Item(def) = instance.def {
+            // Execution might have wandered off into other crates, so we cannot do a stability-
+            // sensitive check here.  But we can at least rule out functions that are not const
+            // at all.
+            if !ecx.tcx.is_const_fn_raw(def.did) {
+                // allow calling functions marked with #[default_method_body_is_const].
+                if !ecx.tcx.has_attr(def.did, sym::default_method_body_is_const) {
+                    // Some functions we support even if they are non-const -- but avoid testing
+                    // that for const fn!
+                    if let Some(new_instance) = ecx.hook_panic_fn(instance, args)? {
+                        // We call another const fn instead.
+                        return Self::find_mir_or_eval_fn(
+                            ecx,
+                            new_instance,
+                            _abi,
+                            args,
+                            _ret,
+                            _unwind,
+                        );
+                    } else {
+                        // We certainly do *not* want to actually call the fn
+                        // though, so be sure we return here.
+                        throw_unsup_format!("calling non-const function `{}`", instance)
+                    }
+                }
+            }
+        }
+        // This is a const fn. Call it.
+        Ok(Some(ecx.load_mir(instance.def, None)?))
+    }
+
+    fn call_intrinsic(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx>],
+        ret: Option<(&PlaceTy<'tcx>, mir::BasicBlock)>,
+        _unwind: StackPopUnwind,
+    ) -> InterpResult<'tcx> {
+        // Shared intrinsics.
+        if ecx.emulate_intrinsic(instance, args, ret)? {
+            return Ok(());
+        }
+        let intrinsic_name = ecx.tcx.item_name(instance.def_id());
+
+        // CTFE-specific intrinsics.
+        let (dest, ret) = match ret {
+            None => {
+                return Err(ConstEvalErrKind::NeedsRfc(format!(
+                    "calling intrinsic `{}`",
+                    intrinsic_name
+                ))
+                .into());
+            }
+            Some(p) => p,
+        };
+        match intrinsic_name {
+            sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
+                let a = ecx.read_immediate(&args[0])?.to_scalar()?;
+                let b = ecx.read_immediate(&args[1])?.to_scalar()?;
+                let cmp = if intrinsic_name == sym::ptr_guaranteed_eq {
+                    ecx.guaranteed_eq(a, b)
+                } else {
+                    ecx.guaranteed_ne(a, b)
+                };
+                ecx.write_scalar(Scalar::from_bool(cmp), dest)?;
+            }
+            sym::const_allocate => {
+                let size = ecx.read_scalar(&args[0])?.to_machine_usize(ecx)?;
+                let align = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?;
+
+                let align = match Align::from_bytes(align) {
+                    Ok(a) => a,
+                    Err(err) => throw_ub_format!("align has to be a power of 2, {}", err),
+                };
+
+                let ptr = ecx.memory.allocate(
+                    Size::from_bytes(size as u64),
+                    align,
+                    interpret::MemoryKind::Machine(MemoryKind::Heap),
+                )?;
+                ecx.write_pointer(ptr, dest)?;
+            }
+            _ => {
+                return Err(ConstEvalErrKind::NeedsRfc(format!(
+                    "calling intrinsic `{}`",
+                    intrinsic_name
+                ))
+                .into());
+            }
+        }
+
+        ecx.go_to_block(ret);
+        Ok(())
+    }
+
+    fn assert_panic(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        msg: &AssertMessage<'tcx>,
+        _unwind: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx> {
+        use rustc_middle::mir::AssertKind::*;
+        // Convert `AssertKind<Operand>` to `AssertKind<Scalar>`.
+        let eval_to_int =
+            |op| ecx.read_immediate(&ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
+        let err = match msg {
+            BoundsCheck { ref len, ref index } => {
+                let len = eval_to_int(len)?;
+                let index = eval_to_int(index)?;
+                BoundsCheck { len, index }
+            }
+            Overflow(op, l, r) => Overflow(*op, eval_to_int(l)?, eval_to_int(r)?),
+            OverflowNeg(op) => OverflowNeg(eval_to_int(op)?),
+            DivisionByZero(op) => DivisionByZero(eval_to_int(op)?),
+            RemainderByZero(op) => RemainderByZero(eval_to_int(op)?),
+            ResumedAfterReturn(generator_kind) => ResumedAfterReturn(*generator_kind),
+            ResumedAfterPanic(generator_kind) => ResumedAfterPanic(*generator_kind),
+        };
+        Err(ConstEvalErrKind::AssertFailure(err).into())
+    }
+
+    fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !> {
+        Err(ConstEvalErrKind::Abort(msg).into())
+    }
+
+    fn binary_ptr_op(
+        _ecx: &InterpCx<'mir, 'tcx, Self>,
+        _bin_op: mir::BinOp,
+        _left: &ImmTy<'tcx>,
+        _right: &ImmTy<'tcx>,
+    ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
+        Err(ConstEvalErrKind::NeedsRfc("pointer arithmetic or comparison".to_string()).into())
+    }
+
+    fn box_alloc(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _dest: &PlaceTy<'tcx>,
+    ) -> InterpResult<'tcx> {
+        Err(ConstEvalErrKind::NeedsRfc("heap allocations via `box` keyword".to_string()).into())
+    }
+
+    fn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+        // The step limit has already been hit in a previous call to `before_terminator`.
+        if ecx.machine.steps_remaining == 0 {
+            return Ok(());
+        }
+
+        ecx.machine.steps_remaining -= 1;
+        if ecx.machine.steps_remaining == 0 {
+            throw_exhaust!(StepLimitReached)
+        }
+
+        Ok(())
+    }
+
+    #[inline(always)]
+    fn init_frame_extra(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        frame: Frame<'mir, 'tcx>,
+    ) -> InterpResult<'tcx, Frame<'mir, 'tcx>> {
+        // Enforce stack size limit. Add 1 because this is run before the new frame is pushed.
+        if !ecx.recursion_limit.value_within_limit(ecx.stack().len() + 1) {
+            throw_exhaust!(StackFrameLimitReached)
+        } else {
+            Ok(frame)
+        }
+    }
+
+    #[inline(always)]
+    fn stack(
+        ecx: &'a InterpCx<'mir, 'tcx, Self>,
+    ) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] {
+        &ecx.machine.stack
+    }
+
+    #[inline(always)]
+    fn stack_mut(
+        ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+    ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>> {
+        &mut ecx.machine.stack
+    }
+
+    fn before_access_global(
+        memory_extra: &MemoryExtra,
+        alloc_id: AllocId,
+        allocation: &Allocation,
+        static_def_id: Option<DefId>,
+        is_write: bool,
+    ) -> InterpResult<'tcx> {
+        if is_write {
+            // Write access. These are never allowed, but we give a targeted error message.
+            if allocation.mutability == Mutability::Not {
+                Err(err_ub!(WriteToReadOnly(alloc_id)).into())
+            } else {
+                Err(ConstEvalErrKind::ModifiedGlobal.into())
+            }
+        } else {
+            // Read access. These are usually allowed, with some exceptions.
+            if memory_extra.can_access_statics {
+                // Machine configuration allows us read from anything (e.g., `static` initializer).
+                Ok(())
+            } else if static_def_id.is_some() {
+                // Machine configuration does not allow us to read statics
+                // (e.g., `const` initializer).
+                // See const_eval::machine::MemoryExtra::can_access_statics for why
+                // this check is so important: if we could read statics, we could read pointers
+                // to mutable allocations *inside* statics. These allocations are not themselves
+                // statics, so pointers to them can get around the check in `validity.rs`.
+                Err(ConstEvalErrKind::ConstAccessesStatic.into())
+            } else {
+                // Immutable global, this read is fine.
+                // But make sure we never accept a read from something mutable, that would be
+                // unsound. The reason is that as the content of this allocation may be different
+                // now and at run-time, so if we permit reading now we might return the wrong value.
+                assert_eq!(allocation.mutability, Mutability::Not);
+                Ok(())
+            }
+        }
+    }
+}
+
+// Please do not add any code below the above `Machine` trait impl. I (oli-obk) plan more cleanups
+// so we can end up having a file with just that impl, but for now, let's keep the impl discoverable
+// at the bottom of this file.
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
new file mode 100644
index 00000000000..a334165df4c
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -0,0 +1,207 @@
+// Not in interpret to make sure we do not use private implementation details
+
+use std::convert::TryFrom;
+
+use rustc_hir::Mutability;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_middle::{
+    mir::{self, interpret::ConstAlloc},
+    ty::ScalarInt,
+};
+use rustc_span::{source_map::DUMMY_SP, symbol::Symbol};
+
+use crate::interpret::{
+    intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, MPlaceTy, MemPlaceMeta, Scalar,
+};
+
+mod error;
+mod eval_queries;
+mod fn_queries;
+mod machine;
+
+pub use error::*;
+pub use eval_queries::*;
+pub use fn_queries::*;
+pub use machine::*;
+
+pub(crate) fn const_caller_location(
+    tcx: TyCtxt<'tcx>,
+    (file, line, col): (Symbol, u32, u32),
+) -> ConstValue<'tcx> {
+    trace!("const_caller_location: {}:{}:{}", file, line, col);
+    let mut ecx = mk_eval_cx(tcx, DUMMY_SP, ty::ParamEnv::reveal_all(), false);
+
+    let loc_place = ecx.alloc_caller_location(file, line, col);
+    if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
+        bug!("intern_const_alloc_recursive should not error in this case")
+    }
+    ConstValue::Scalar(Scalar::from_pointer(loc_place.ptr.into_pointer_or_addr().unwrap(), &tcx))
+}
+
+/// Convert an evaluated constant to a type level constant
+pub(crate) fn const_to_valtree<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    raw: ConstAlloc<'tcx>,
+) -> Option<ty::ValTree<'tcx>> {
+    let ecx = mk_eval_cx(
+        tcx, DUMMY_SP, param_env,
+        // It is absolutely crucial for soundness that
+        // we do not read from static items or other mutable memory.
+        false,
+    );
+    let place = ecx.raw_const_to_mplace(raw).unwrap();
+    const_to_valtree_inner(&ecx, &place)
+}
+
+fn const_to_valtree_inner<'tcx>(
+    ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+    place: &MPlaceTy<'tcx>,
+) -> Option<ty::ValTree<'tcx>> {
+    let branches = |n, variant| {
+        let place = match variant {
+            Some(variant) => ecx.mplace_downcast(&place, variant).unwrap(),
+            None => *place,
+        };
+        let variant =
+            variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32()))));
+        let fields = (0..n).map(|i| {
+            let field = ecx.mplace_field(&place, i).unwrap();
+            const_to_valtree_inner(ecx, &field)
+        });
+        // For enums, we preped their variant index before the variant's fields so we can figure out
+        // the variant again when just seeing a valtree.
+        let branches = variant.into_iter().chain(fields);
+        Some(ty::ValTree::Branch(
+            ecx.tcx.arena.alloc_from_iter(branches.collect::<Option<Vec<_>>>()?),
+        ))
+    };
+    match place.layout.ty.kind() {
+        ty::FnDef(..) => Some(ty::ValTree::zst()),
+        ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
+            let val = ecx.read_immediate(&place.into()).unwrap();
+            let val = val.to_scalar().unwrap();
+            Some(ty::ValTree::Leaf(val.assert_int()))
+        }
+
+        // Raw pointers are not allowed in type level constants, as we cannot properly test them for
+        // equality at compile-time (see `ptr_guaranteed_eq`/`_ne`).
+        // Technically we could allow function pointers (represented as `ty::Instance`), but this is not guaranteed to
+        // agree with runtime equality tests.
+        ty::FnPtr(_) | ty::RawPtr(_) => None,
+        ty::Ref(..) => unimplemented!("need to use deref_const"),
+
+        // Trait objects are not allowed in type level constants, as we have no concept for
+        // resolving their backing type, even if we can do that at const eval time. We may
+        // hypothetically be able to allow `dyn StructuralEq` trait objects in the future,
+        // but it is unclear if this is useful.
+        ty::Dynamic(..) => None,
+
+        ty::Slice(_) | ty::Str => {
+            unimplemented!("need to find the backing data of the slice/str and recurse on that")
+        }
+        ty::Tuple(substs) => branches(substs.len(), None),
+        ty::Array(_, len) => branches(usize::try_from(len.eval_usize(ecx.tcx.tcx, ecx.param_env)).unwrap(), None),
+
+        ty::Adt(def, _) => {
+            if def.variants.is_empty() {
+                bug!("uninhabited types should have errored and never gotten converted to valtree")
+            }
+
+            let variant = ecx.read_discriminant(&place.into()).unwrap().1;
+
+            branches(def.variants[variant].fields.len(), def.is_enum().then_some(variant))
+        }
+
+        ty::Never
+        | ty::Error(_)
+        | ty::Foreign(..)
+        | ty::Infer(ty::FreshIntTy(_))
+        | ty::Infer(ty::FreshFloatTy(_))
+        | ty::Projection(..)
+        | ty::Param(_)
+        | ty::Bound(..)
+        | ty::Placeholder(..)
+        // FIXME(oli-obk): we could look behind opaque types
+        | ty::Opaque(..)
+        | ty::Infer(_)
+        // FIXME(oli-obk): we can probably encode closures just like structs
+        | ty::Closure(..)
+        | ty::Generator(..)
+        | ty::GeneratorWitness(..) => None,
+    }
+}
+
+/// This function uses `unwrap` copiously, because an already validated constant
+/// must have valid fields and can thus never fail outside of compiler bugs. However, it is
+/// invoked from the pretty printer, where it can receive enums with no variants and e.g.
+/// `read_discriminant` needs to be able to handle that.
+pub(crate) fn destructure_const<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    val: &'tcx ty::Const<'tcx>,
+) -> mir::DestructuredConst<'tcx> {
+    trace!("destructure_const: {:?}", val);
+    let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
+    let op = ecx.const_to_op(val, None).unwrap();
+
+    // We go to `usize` as we cannot allocate anything bigger anyway.
+    let (field_count, variant, down) = match val.ty.kind() {
+        ty::Array(_, len) => (usize::try_from(len.eval_usize(tcx, param_env)).unwrap(), None, op),
+        ty::Adt(def, _) if def.variants.is_empty() => {
+            return mir::DestructuredConst { variant: None, fields: &[] };
+        }
+        ty::Adt(def, _) => {
+            let variant = ecx.read_discriminant(&op).unwrap().1;
+            let down = ecx.operand_downcast(&op, variant).unwrap();
+            (def.variants[variant].fields.len(), Some(variant), down)
+        }
+        ty::Tuple(substs) => (substs.len(), None, op),
+        _ => bug!("cannot destructure constant {:?}", val),
+    };
+
+    let fields_iter = (0..field_count).map(|i| {
+        let field_op = ecx.operand_field(&down, i).unwrap();
+        let val = op_to_const(&ecx, &field_op);
+        ty::Const::from_value(tcx, val, field_op.layout.ty)
+    });
+    let fields = tcx.arena.alloc_from_iter(fields_iter);
+
+    mir::DestructuredConst { variant, fields }
+}
+
+pub(crate) fn deref_const<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    val: &'tcx ty::Const<'tcx>,
+) -> &'tcx ty::Const<'tcx> {
+    trace!("deref_const: {:?}", val);
+    let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
+    let op = ecx.const_to_op(val, None).unwrap();
+    let mplace = ecx.deref_operand(&op).unwrap();
+    if let Some(alloc_id) = mplace.ptr.provenance {
+        assert_eq!(
+            tcx.get_global_alloc(alloc_id).unwrap().unwrap_memory().mutability,
+            Mutability::Not,
+            "deref_const cannot be used with mutable allocations as \
+            that could allow pattern matching to observe mutable statics",
+        );
+    }
+
+    let ty = match mplace.meta {
+        MemPlaceMeta::None => mplace.layout.ty,
+        MemPlaceMeta::Poison => bug!("poison metadata in `deref_const`: {:#?}", mplace),
+        // In case of unsized types, figure out the real type behind.
+        MemPlaceMeta::Meta(scalar) => match mplace.layout.ty.kind() {
+            ty::Str => bug!("there's no sized equivalent of a `str`"),
+            ty::Slice(elem_ty) => tcx.mk_array(elem_ty, scalar.to_machine_usize(&tcx).unwrap()),
+            _ => bug!(
+                "type {} should not have metadata, but had {:?}",
+                mplace.layout.ty,
+                mplace.meta
+            ),
+        },
+    };
+
+    tcx.mk_const(ty::Const { val: ty::ConstKind::Value(op_to_const(&ecx, &mplace.into())), ty })
+}
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
new file mode 100644
index 00000000000..4c4b0bd2d1f
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -0,0 +1,365 @@
+use std::convert::TryFrom;
+
+use rustc_apfloat::ieee::{Double, Single};
+use rustc_apfloat::{Float, FloatConvert};
+use rustc_middle::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
+use rustc_middle::mir::CastKind;
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::layout::{IntegerExt, LayoutOf, TyAndLayout};
+use rustc_middle::ty::{self, FloatTy, Ty, TypeAndMut};
+use rustc_target::abi::{Integer, Variants};
+
+use super::{
+    util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy,
+};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    pub fn cast(
+        &mut self,
+        src: &OpTy<'tcx, M::PointerTag>,
+        cast_kind: CastKind,
+        cast_ty: Ty<'tcx>,
+        dest: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        use rustc_middle::mir::CastKind::*;
+        // FIXME: In which cases should we trigger UB when the source is uninit?
+        match cast_kind {
+            Pointer(PointerCast::Unsize) => {
+                let cast_ty = self.layout_of(cast_ty)?;
+                self.unsize_into(src, cast_ty, dest)?;
+            }
+
+            Misc => {
+                let src = self.read_immediate(src)?;
+                let res = self.misc_cast(&src, cast_ty)?;
+                self.write_immediate(res, dest)?;
+            }
+
+            Pointer(PointerCast::MutToConstPointer | PointerCast::ArrayToPointer) => {
+                // These are NOPs, but can be wide pointers.
+                let v = self.read_immediate(src)?;
+                self.write_immediate(*v, dest)?;
+            }
+
+            Pointer(PointerCast::ReifyFnPointer) => {
+                // The src operand does not matter, just its type
+                match *src.layout.ty.kind() {
+                    ty::FnDef(def_id, substs) => {
+                        // All reifications must be monomorphic, bail out otherwise.
+                        ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+
+                        let instance = ty::Instance::resolve_for_fn_ptr(
+                            *self.tcx,
+                            self.param_env,
+                            def_id,
+                            substs,
+                        )
+                        .ok_or_else(|| err_inval!(TooGeneric))?;
+
+                        let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
+                        self.write_pointer(fn_ptr, dest)?;
+                    }
+                    _ => span_bug!(self.cur_span(), "reify fn pointer on {:?}", src.layout.ty),
+                }
+            }
+
+            Pointer(PointerCast::UnsafeFnPointer) => {
+                let src = self.read_immediate(src)?;
+                match cast_ty.kind() {
+                    ty::FnPtr(_) => {
+                        // No change to value
+                        self.write_immediate(*src, dest)?;
+                    }
+                    _ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {:?}", cast_ty),
+                }
+            }
+
+            Pointer(PointerCast::ClosureFnPointer(_)) => {
+                // The src operand does not matter, just its type
+                match *src.layout.ty.kind() {
+                    ty::Closure(def_id, substs) => {
+                        // All reifications must be monomorphic, bail out otherwise.
+                        ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+
+                        let instance = ty::Instance::resolve_closure(
+                            *self.tcx,
+                            def_id,
+                            substs,
+                            ty::ClosureKind::FnOnce,
+                        );
+                        let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
+                        self.write_pointer(fn_ptr, dest)?;
+                    }
+                    _ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout.ty),
+                }
+            }
+        }
+        Ok(())
+    }
+
+    fn misc_cast(
+        &self,
+        src: &ImmTy<'tcx, M::PointerTag>,
+        cast_ty: Ty<'tcx>,
+    ) -> InterpResult<'tcx, Immediate<M::PointerTag>> {
+        use rustc_middle::ty::TyKind::*;
+        trace!("Casting {:?}: {:?} to {:?}", *src, src.layout.ty, cast_ty);
+
+        match src.layout.ty.kind() {
+            // Floating point
+            Float(FloatTy::F32) => {
+                return Ok(self.cast_from_float(src.to_scalar()?.to_f32()?, cast_ty).into());
+            }
+            Float(FloatTy::F64) => {
+                return Ok(self.cast_from_float(src.to_scalar()?.to_f64()?, cast_ty).into());
+            }
+            // The rest is integer/pointer-"like", including fn ptr casts and casts from enums that
+            // are represented as integers.
+            _ => assert!(
+                src.layout.ty.is_bool()
+                    || src.layout.ty.is_char()
+                    || src.layout.ty.is_enum()
+                    || src.layout.ty.is_integral()
+                    || src.layout.ty.is_any_ptr(),
+                "Unexpected cast from type {:?}",
+                src.layout.ty
+            ),
+        }
+
+        // # First handle non-scalar source values.
+
+        // Handle cast from a ZST enum (0 or 1 variants).
+        match src.layout.variants {
+            Variants::Single { index } => {
+                if src.layout.abi.is_uninhabited() {
+                    // This is dead code, because an uninhabited enum is UB to
+                    // instantiate.
+                    throw_ub!(Unreachable);
+                }
+                if let Some(discr) = src.layout.ty.discriminant_for_variant(*self.tcx, index) {
+                    assert!(src.layout.is_zst());
+                    let discr_layout = self.layout_of(discr.ty)?;
+                    return Ok(self.cast_from_scalar(discr.val, discr_layout, cast_ty).into());
+                }
+            }
+            Variants::Multiple { .. } => {}
+        }
+
+        // Handle casting any ptr to raw ptr (might be a fat ptr).
+        if src.layout.ty.is_any_ptr() && cast_ty.is_unsafe_ptr() {
+            let dest_layout = self.layout_of(cast_ty)?;
+            if dest_layout.size == src.layout.size {
+                // Thin or fat pointer that just hast the ptr kind of target type changed.
+                return Ok(**src);
+            } else {
+                // Casting the metadata away from a fat ptr.
+                assert_eq!(src.layout.size, 2 * self.memory.pointer_size());
+                assert_eq!(dest_layout.size, self.memory.pointer_size());
+                assert!(src.layout.ty.is_unsafe_ptr());
+                return match **src {
+                    Immediate::ScalarPair(data, _) => Ok(data.into()),
+                    Immediate::Scalar(..) => span_bug!(
+                        self.cur_span(),
+                        "{:?} input to a fat-to-thin cast ({:?} -> {:?})",
+                        *src,
+                        src.layout.ty,
+                        cast_ty
+                    ),
+                };
+            }
+        }
+
+        // # The remaining source values are scalar.
+
+        // For all remaining casts, we either
+        // (a) cast a raw ptr to usize, or
+        // (b) cast from an integer-like (including bool, char, enums).
+        // In both cases we want the bits.
+        let bits = src.to_scalar()?.to_bits(src.layout.size)?;
+        Ok(self.cast_from_scalar(bits, src.layout, cast_ty).into())
+    }
+
+    pub(super) fn cast_from_scalar(
+        &self,
+        v: u128, // raw bits (there is no ScalarTy so we separate data+layout)
+        src_layout: TyAndLayout<'tcx>,
+        cast_ty: Ty<'tcx>,
+    ) -> Scalar<M::PointerTag> {
+        // Let's make sure v is sign-extended *if* it has a signed type.
+        let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`.
+        let v = if signed { self.sign_extend(v, src_layout) } else { v };
+        trace!("cast_from_scalar: {}, {} -> {}", v, src_layout.ty, cast_ty);
+        use rustc_middle::ty::TyKind::*;
+        match *cast_ty.kind() {
+            Int(_) | Uint(_) | RawPtr(_) => {
+                let size = match *cast_ty.kind() {
+                    Int(t) => Integer::from_int_ty(self, t).size(),
+                    Uint(t) => Integer::from_uint_ty(self, t).size(),
+                    RawPtr(_) => self.pointer_size(),
+                    _ => bug!(),
+                };
+                let v = size.truncate(v);
+                Scalar::from_uint(v, size)
+            }
+
+            Float(FloatTy::F32) if signed => Scalar::from_f32(Single::from_i128(v as i128).value),
+            Float(FloatTy::F64) if signed => Scalar::from_f64(Double::from_i128(v as i128).value),
+            Float(FloatTy::F32) => Scalar::from_f32(Single::from_u128(v).value),
+            Float(FloatTy::F64) => Scalar::from_f64(Double::from_u128(v).value),
+
+            Char => {
+                // `u8` to `char` cast
+                Scalar::from_u32(u8::try_from(v).unwrap().into())
+            }
+
+            // Casts to bool are not permitted by rustc, no need to handle them here.
+            _ => span_bug!(self.cur_span(), "invalid int to {:?} cast", cast_ty),
+        }
+    }
+
+    fn cast_from_float<F>(&self, f: F, dest_ty: Ty<'tcx>) -> Scalar<M::PointerTag>
+    where
+        F: Float + Into<Scalar<M::PointerTag>> + FloatConvert<Single> + FloatConvert<Double>,
+    {
+        use rustc_middle::ty::TyKind::*;
+        match *dest_ty.kind() {
+            // float -> uint
+            Uint(t) => {
+                let size = Integer::from_uint_ty(self, t).size();
+                // `to_u128` is a saturating cast, which is what we need
+                // (https://doc.rust-lang.org/nightly/nightly-rustc/rustc_apfloat/trait.Float.html#method.to_i128_r).
+                let v = f.to_u128(size.bits_usize()).value;
+                // This should already fit the bit width
+                Scalar::from_uint(v, size)
+            }
+            // float -> int
+            Int(t) => {
+                let size = Integer::from_int_ty(self, t).size();
+                // `to_i128` is a saturating cast, which is what we need
+                // (https://doc.rust-lang.org/nightly/nightly-rustc/rustc_apfloat/trait.Float.html#method.to_i128_r).
+                let v = f.to_i128(size.bits_usize()).value;
+                Scalar::from_int(v, size)
+            }
+            // float -> f32
+            Float(FloatTy::F32) => Scalar::from_f32(f.convert(&mut false).value),
+            // float -> f64
+            Float(FloatTy::F64) => Scalar::from_f64(f.convert(&mut false).value),
+            // That's it.
+            _ => span_bug!(self.cur_span(), "invalid float to {:?} cast", dest_ty),
+        }
+    }
+
+    fn unsize_into_ptr(
+        &mut self,
+        src: &OpTy<'tcx, M::PointerTag>,
+        dest: &PlaceTy<'tcx, M::PointerTag>,
+        // The pointee types
+        source_ty: Ty<'tcx>,
+        cast_ty: Ty<'tcx>,
+    ) -> InterpResult<'tcx> {
+        // A<Struct> -> A<Trait> conversion
+        let (src_pointee_ty, dest_pointee_ty) =
+            self.tcx.struct_lockstep_tails_erasing_lifetimes(source_ty, cast_ty, self.param_env);
+
+        match (&src_pointee_ty.kind(), &dest_pointee_ty.kind()) {
+            (&ty::Array(_, length), &ty::Slice(_)) => {
+                let ptr = self.read_immediate(src)?.to_scalar()?;
+                // u64 cast is from usize to u64, which is always good
+                let val =
+                    Immediate::new_slice(ptr, length.eval_usize(*self.tcx, self.param_env), self);
+                self.write_immediate(val, dest)
+            }
+            (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
+                let val = self.read_immediate(src)?;
+                if data_a.principal_def_id() == data_b.principal_def_id() {
+                    return self.write_immediate(*val, dest);
+                }
+                // trait upcasting coercion
+                let vptr_entry_idx = self.tcx.vtable_trait_upcasting_coercion_new_vptr_slot((
+                    src_pointee_ty,
+                    dest_pointee_ty,
+                ));
+
+                if let Some(entry_idx) = vptr_entry_idx {
+                    let entry_idx = u64::try_from(entry_idx).unwrap();
+                    let (old_data, old_vptr) = val.to_scalar_pair()?;
+                    let old_vptr = self.scalar_to_ptr(old_vptr);
+                    let new_vptr = self
+                        .read_new_vtable_after_trait_upcasting_from_vtable(old_vptr, entry_idx)?;
+                    self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest)
+                } else {
+                    self.write_immediate(*val, dest)
+                }
+            }
+            (_, &ty::Dynamic(ref data, _)) => {
+                // Initial cast from sized to dyn trait
+                let vtable = self.get_vtable(src_pointee_ty, data.principal())?;
+                let ptr = self.read_immediate(src)?.to_scalar()?;
+                let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx);
+                self.write_immediate(val, dest)
+            }
+
+            _ => {
+                span_bug!(self.cur_span(), "invalid unsizing {:?} -> {:?}", src.layout.ty, cast_ty)
+            }
+        }
+    }
+
+    fn unsize_into(
+        &mut self,
+        src: &OpTy<'tcx, M::PointerTag>,
+        cast_ty: TyAndLayout<'tcx>,
+        dest: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        trace!("Unsizing {:?} of type {} into {:?}", *src, src.layout.ty, cast_ty.ty);
+        match (&src.layout.ty.kind(), &cast_ty.ty.kind()) {
+            (&ty::Ref(_, s, _), &ty::Ref(_, c, _) | &ty::RawPtr(TypeAndMut { ty: c, .. }))
+            | (&ty::RawPtr(TypeAndMut { ty: s, .. }), &ty::RawPtr(TypeAndMut { ty: c, .. })) => {
+                self.unsize_into_ptr(src, dest, s, c)
+            }
+            (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+                assert_eq!(def_a, def_b);
+                if def_a.is_box() || def_b.is_box() {
+                    if !def_a.is_box() || !def_b.is_box() {
+                        span_bug!(
+                            self.cur_span(),
+                            "invalid unsizing between {:?} -> {:?}",
+                            src.layout.ty,
+                            cast_ty.ty
+                        );
+                    }
+                    return self.unsize_into_ptr(
+                        src,
+                        dest,
+                        src.layout.ty.boxed_ty(),
+                        cast_ty.ty.boxed_ty(),
+                    );
+                }
+
+                // unsizing of generic struct with pointer fields
+                // Example: `Arc<T>` -> `Arc<Trait>`
+                // here we need to increase the size of every &T thin ptr field to a fat ptr
+                for i in 0..src.layout.fields.count() {
+                    let cast_ty_field = cast_ty.field(self, i);
+                    if cast_ty_field.is_zst() {
+                        continue;
+                    }
+                    let src_field = self.operand_field(src, i)?;
+                    let dst_field = self.place_field(dest, i)?;
+                    if src_field.layout.ty == cast_ty_field.ty {
+                        self.copy_op(&src_field, &dst_field)?;
+                    } else {
+                        self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
+                    }
+                }
+                Ok(())
+            }
+            _ => span_bug!(
+                self.cur_span(),
+                "unsize_into: invalid conversion: {:?} -> {:?}",
+                src.layout,
+                dest.layout
+            ),
+        }
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
new file mode 100644
index 00000000000..0521443533b
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -0,0 +1,1049 @@
+use std::cell::Cell;
+use std::fmt;
+use std::mem;
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir::{self as hir, def_id::DefId, definitions::DefPathData};
+use rustc_index::vec::IndexVec;
+use rustc_macros::HashStable;
+use rustc_middle::ich::StableHashingContext;
+use rustc_middle::mir;
+use rustc_middle::ty::layout::{self, LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
+use rustc_middle::ty::{
+    self, query::TyCtxtAt, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable,
+};
+use rustc_mir_dataflow::storage::AlwaysLiveLocals;
+use rustc_session::Limit;
+use rustc_span::{Pos, Span};
+use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout};
+
+use super::{
+    AllocId, GlobalId, Immediate, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace,
+    MemPlaceMeta, Memory, MemoryKind, Operand, Place, PlaceTy, Pointer, Provenance, Scalar,
+    ScalarMaybeUninit, StackPopJump,
+};
+use crate::transform::validate::equal_up_to_regions;
+
+pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    /// Stores the `Machine` instance.
+    ///
+    /// Note: the stack is provided by the machine.
+    pub machine: M,
+
+    /// The results of the type checker, from rustc.
+    /// The span in this is the "root" of the evaluation, i.e., the const
+    /// we are evaluating (if this is CTFE).
+    pub tcx: TyCtxtAt<'tcx>,
+
+    /// Bounds in scope for polymorphic evaluations.
+    pub(crate) param_env: ty::ParamEnv<'tcx>,
+
+    /// The virtual memory system.
+    pub memory: Memory<'mir, 'tcx, M>,
+
+    /// The recursion limit (cached from `tcx.recursion_limit(())`)
+    pub recursion_limit: Limit,
+}
+
+// The Phantomdata exists to prevent this type from being `Send`. If it were sent across a thread
+// boundary and dropped in the other thread, it would exit the span in the other thread.
+struct SpanGuard(tracing::Span, std::marker::PhantomData<*const u8>);
+
+impl SpanGuard {
+    /// By default a `SpanGuard` does nothing.
+    fn new() -> Self {
+        Self(tracing::Span::none(), std::marker::PhantomData)
+    }
+
+    /// If a span is entered, we exit the previous span (if any, normally none) and enter the
+    /// new span. This is mainly so we don't have to use `Option` for the `tracing_span` field of
+    /// `Frame` by creating a dummy span to being with and then entering it once the frame has
+    /// been pushed.
+    fn enter(&mut self, span: tracing::Span) {
+        // This executes the destructor on the previous instance of `SpanGuard`, ensuring that
+        // we never enter or exit more spans than vice versa. Unless you `mem::leak`, then we
+        // can't protect the tracing stack, but that'll just lead to weird logging, no actual
+        // problems.
+        *self = Self(span, std::marker::PhantomData);
+        self.0.with_subscriber(|(id, dispatch)| {
+            dispatch.enter(id);
+        });
+    }
+}
+
+impl Drop for SpanGuard {
+    fn drop(&mut self) {
+        self.0.with_subscriber(|(id, dispatch)| {
+            dispatch.exit(id);
+        });
+    }
+}
+
+/// A stack frame.
+pub struct Frame<'mir, 'tcx, Tag: Provenance = AllocId, Extra = ()> {
+    ////////////////////////////////////////////////////////////////////////////////
+    // Function and callsite information
+    ////////////////////////////////////////////////////////////////////////////////
+    /// The MIR for the function called on this frame.
+    pub body: &'mir mir::Body<'tcx>,
+
+    /// The def_id and substs of the current function.
+    pub instance: ty::Instance<'tcx>,
+
+    /// Extra data for the machine.
+    pub extra: Extra,
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Return place and locals
+    ////////////////////////////////////////////////////////////////////////////////
+    /// Work to perform when returning from this function.
+    pub return_to_block: StackPopCleanup,
+
+    /// The location where the result of the current stack frame should be written to,
+    /// and its layout in the caller.
+    pub return_place: Option<PlaceTy<'tcx, Tag>>,
+
+    /// The list of locals for this stack frame, stored in order as
+    /// `[return_ptr, arguments..., variables..., temporaries...]`.
+    /// The locals are stored as `Option<Value>`s.
+    /// `None` represents a local that is currently dead, while a live local
+    /// can either directly contain `Scalar` or refer to some part of an `Allocation`.
+    pub locals: IndexVec<mir::Local, LocalState<'tcx, Tag>>,
+
+    /// The span of the `tracing` crate is stored here.
+    /// When the guard is dropped, the span is exited. This gives us
+    /// a full stack trace on all tracing statements.
+    tracing_span: SpanGuard,
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Current position within the function
+    ////////////////////////////////////////////////////////////////////////////////
+    /// If this is `Err`, we are not currently executing any particular statement in
+    /// this frame (can happen e.g. during frame initialization, and during unwinding on
+    /// frames without cleanup code).
+    /// We basically abuse `Result` as `Either`.
+    pub(super) loc: Result<mir::Location, Span>,
+}
+
+/// What we store about a frame in an interpreter backtrace.
+#[derive(Debug)]
+pub struct FrameInfo<'tcx> {
+    pub instance: ty::Instance<'tcx>,
+    pub span: Span,
+    pub lint_root: Option<hir::HirId>,
+}
+
+/// Unwind information.
+#[derive(Clone, Copy, Eq, PartialEq, Debug, HashStable)]
+pub enum StackPopUnwind {
+    /// The cleanup block.
+    Cleanup(mir::BasicBlock),
+    /// No cleanup needs to be done.
+    Skip,
+    /// Unwinding is not allowed (UB).
+    NotAllowed,
+}
+
+#[derive(Clone, Copy, Eq, PartialEq, Debug, HashStable)] // Miri debug-prints these
+pub enum StackPopCleanup {
+    /// Jump to the next block in the caller, or cause UB if None (that's a function
+    /// that may never return). Also store layout of return place so
+    /// we can validate it at that layout.
+    /// `ret` stores the block we jump to on a normal return, while `unwind`
+    /// stores the block used for cleanup during unwinding.
+    Goto { ret: Option<mir::BasicBlock>, unwind: StackPopUnwind },
+    /// Just do nothing: Used by Main and for the `box_alloc` hook in miri.
+    /// `cleanup` says whether locals are deallocated. Static computation
+    /// wants them leaked to intern what they need (and just throw away
+    /// the entire `ecx` when it is done).
+    None { cleanup: bool },
+}
+
+/// State of a local variable including a memoized layout
+#[derive(Clone, PartialEq, Eq, HashStable)]
+pub struct LocalState<'tcx, Tag: Provenance = AllocId> {
+    pub value: LocalValue<Tag>,
+    /// Don't modify if `Some`, this is only used to prevent computing the layout twice
+    #[stable_hasher(ignore)]
+    pub layout: Cell<Option<TyAndLayout<'tcx>>>,
+}
+
+/// Current value of a local variable
+#[derive(Copy, Clone, PartialEq, Eq, HashStable, Debug)] // Miri debug-prints these
+pub enum LocalValue<Tag: Provenance = AllocId> {
+    /// This local is not currently alive, and cannot be used at all.
+    Dead,
+    /// This local is alive but not yet initialized. It can be written to
+    /// but not read from or its address taken. Locals get initialized on
+    /// first write because for unsized locals, we do not know their size
+    /// before that.
+    Uninitialized,
+    /// A normal, live local.
+    /// Mostly for convenience, we re-use the `Operand` type here.
+    /// This is an optimization over just always having a pointer here;
+    /// we can thus avoid doing an allocation when the local just stores
+    /// immediate values *and* never has its address taken.
+    Live(Operand<Tag>),
+}
+
+impl<'tcx, Tag: Provenance + 'static> LocalState<'tcx, Tag> {
+    /// Read the local's value or error if the local is not yet live or not live anymore.
+    ///
+    /// Note: This may only be invoked from the `Machine::access_local` hook and not from
+    /// anywhere else. You may be invalidating machine invariants if you do!
+    pub fn access(&self) -> InterpResult<'tcx, Operand<Tag>> {
+        match self.value {
+            LocalValue::Dead => throw_ub!(DeadLocal),
+            LocalValue::Uninitialized => {
+                bug!("The type checker should prevent reading from a never-written local")
+            }
+            LocalValue::Live(val) => Ok(val),
+        }
+    }
+
+    /// Overwrite the local.  If the local can be overwritten in place, return a reference
+    /// to do so; otherwise return the `MemPlace` to consult instead.
+    ///
+    /// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from
+    /// anywhere else. You may be invalidating machine invariants if you do!
+    pub fn access_mut(
+        &mut self,
+    ) -> InterpResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
+        match self.value {
+            LocalValue::Dead => throw_ub!(DeadLocal),
+            LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)),
+            ref mut
+            local @ (LocalValue::Live(Operand::Immediate(_)) | LocalValue::Uninitialized) => {
+                Ok(Ok(local))
+            }
+        }
+    }
+}
+
+impl<'mir, 'tcx, Tag: Provenance> Frame<'mir, 'tcx, Tag> {
+    pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Tag, Extra> {
+        Frame {
+            body: self.body,
+            instance: self.instance,
+            return_to_block: self.return_to_block,
+            return_place: self.return_place,
+            locals: self.locals,
+            loc: self.loc,
+            extra,
+            tracing_span: self.tracing_span,
+        }
+    }
+}
+
+impl<'mir, 'tcx, Tag: Provenance, Extra> Frame<'mir, 'tcx, Tag, Extra> {
+    /// Get the current location within the Frame.
+    ///
+    /// If this is `Err`, we are not currently executing any particular statement in
+    /// this frame (can happen e.g. during frame initialization, and during unwinding on
+    /// frames without cleanup code).
+    /// We basically abuse `Result` as `Either`.
+    ///
+    /// Used by priroda.
+    pub fn current_loc(&self) -> Result<mir::Location, Span> {
+        self.loc
+    }
+
+    /// Return the `SourceInfo` of the current instruction.
+    pub fn current_source_info(&self) -> Option<&mir::SourceInfo> {
+        self.loc.ok().map(|loc| self.body.source_info(loc))
+    }
+
+    pub fn current_span(&self) -> Span {
+        match self.loc {
+            Ok(loc) => self.body.source_info(loc).span,
+            Err(span) => span,
+        }
+    }
+}
+
+impl<'tcx> fmt::Display for FrameInfo<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        ty::tls::with(|tcx| {
+            if tcx.def_key(self.instance.def_id()).disambiguated_data.data
+                == DefPathData::ClosureExpr
+            {
+                write!(f, "inside closure")?;
+            } else {
+                write!(f, "inside `{}`", self.instance)?;
+            }
+            if !self.span.is_dummy() {
+                let sm = tcx.sess.source_map();
+                let lo = sm.lookup_char_pos(self.span.lo());
+                write!(
+                    f,
+                    " at {}:{}:{}",
+                    sm.filename_for_diagnostics(&lo.file.name),
+                    lo.line,
+                    lo.col.to_usize() + 1
+                )?;
+            }
+            Ok(())
+        })
+    }
+}
+
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> {
+    #[inline]
+    fn data_layout(&self) -> &TargetDataLayout {
+        &self.tcx.data_layout
+    }
+}
+
+impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M>
+where
+    M: Machine<'mir, 'tcx>,
+{
+    #[inline]
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        *self.tcx
+    }
+}
+
+impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M>
+where
+    M: Machine<'mir, 'tcx>,
+{
+    fn param_env(&self) -> ty::ParamEnv<'tcx> {
+        self.param_env
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> LayoutOfHelpers<'tcx> for InterpCx<'mir, 'tcx, M> {
+    type LayoutOfResult = InterpResult<'tcx, TyAndLayout<'tcx>>;
+
+    #[inline]
+    fn layout_tcx_at_span(&self) -> Span {
+        self.tcx.span
+    }
+
+    #[inline]
+    fn handle_layout_err(
+        &self,
+        err: LayoutError<'tcx>,
+        _: Span,
+        _: Ty<'tcx>,
+    ) -> InterpErrorInfo<'tcx> {
+        err_inval!(Layout(err)).into()
+    }
+}
+
+/// Test if it is valid for a MIR assignment to assign `src`-typed place to `dest`-typed value.
+/// This test should be symmetric, as it is primarily about layout compatibility.
+pub(super) fn mir_assign_valid_types<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    src: TyAndLayout<'tcx>,
+    dest: TyAndLayout<'tcx>,
+) -> bool {
+    // Type-changing assignments can happen when subtyping is used. While
+    // all normal lifetimes are erased, higher-ranked types with their
+    // late-bound lifetimes are still around and can lead to type
+    // differences. So we compare ignoring lifetimes.
+    if equal_up_to_regions(tcx, param_env, src.ty, dest.ty) {
+        // Make sure the layout is equal, too -- just to be safe. Miri really
+        // needs layout equality. For performance reason we skip this check when
+        // the types are equal. Equal types *can* have different layouts when
+        // enum downcast is involved (as enum variants carry the type of the
+        // enum), but those should never occur in assignments.
+        if cfg!(debug_assertions) || src.ty != dest.ty {
+            assert_eq!(src.layout, dest.layout);
+        }
+        true
+    } else {
+        false
+    }
+}
+
+/// Use the already known layout if given (but sanity check in debug mode),
+/// or compute the layout.
+#[cfg_attr(not(debug_assertions), inline(always))]
+pub(super) fn from_known_layout<'tcx>(
+    tcx: TyCtxtAt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    known_layout: Option<TyAndLayout<'tcx>>,
+    compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>,
+) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+    match known_layout {
+        None => compute(),
+        Some(known_layout) => {
+            if cfg!(debug_assertions) {
+                let check_layout = compute()?;
+                if !mir_assign_valid_types(tcx.tcx, param_env, check_layout, known_layout) {
+                    span_bug!(
+                        tcx.span,
+                        "expected type differs from actual type.\nexpected: {:?}\nactual: {:?}",
+                        known_layout.ty,
+                        check_layout.ty,
+                    );
+                }
+            }
+            Ok(known_layout)
+        }
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    pub fn new(
+        tcx: TyCtxt<'tcx>,
+        root_span: Span,
+        param_env: ty::ParamEnv<'tcx>,
+        machine: M,
+        memory_extra: M::MemoryExtra,
+    ) -> Self {
+        InterpCx {
+            machine,
+            tcx: tcx.at(root_span),
+            param_env,
+            memory: Memory::new(tcx, memory_extra),
+            recursion_limit: tcx.recursion_limit(),
+        }
+    }
+
+    #[inline(always)]
+    pub fn cur_span(&self) -> Span {
+        self.stack()
+            .iter()
+            .rev()
+            .find(|frame| !frame.instance.def.requires_caller_location(*self.tcx))
+            .map_or(self.tcx.span, |f| f.current_span())
+    }
+
+    #[inline(always)]
+    pub fn scalar_to_ptr(&self, scalar: Scalar<M::PointerTag>) -> Pointer<Option<M::PointerTag>> {
+        self.memory.scalar_to_ptr(scalar)
+    }
+
+    /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
+    /// the machine pointer to the allocation.  Must never be used
+    /// for any other pointers, nor for TLS statics.
+    ///
+    /// Using the resulting pointer represents a *direct* access to that memory
+    /// (e.g. by directly using a `static`),
+    /// as opposed to access through a pointer that was created by the program.
+    ///
+    /// This function can fail only if `ptr` points to an `extern static`.
+    #[inline(always)]
+    pub fn global_base_pointer(&self, ptr: Pointer) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
+        self.memory.global_base_pointer(ptr)
+    }
+
+    #[inline(always)]
+    pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] {
+        M::stack(self)
+    }
+
+    #[inline(always)]
+    pub(crate) fn stack_mut(
+        &mut self,
+    ) -> &mut Vec<Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>> {
+        M::stack_mut(self)
+    }
+
+    #[inline(always)]
+    pub fn frame_idx(&self) -> usize {
+        let stack = self.stack();
+        assert!(!stack.is_empty());
+        stack.len() - 1
+    }
+
+    #[inline(always)]
+    pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
+        self.stack().last().expect("no call frames exist")
+    }
+
+    #[inline(always)]
+    pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
+        self.stack_mut().last_mut().expect("no call frames exist")
+    }
+
+    #[inline(always)]
+    pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
+        self.frame().body
+    }
+
+    #[inline(always)]
+    pub fn sign_extend(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
+        assert!(ty.abi.is_signed());
+        ty.size.sign_extend(value)
+    }
+
+    #[inline(always)]
+    pub fn truncate(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
+        ty.size.truncate(value)
+    }
+
+    #[inline]
+    pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
+        ty.is_freeze(self.tcx, self.param_env)
+    }
+
+    pub fn load_mir(
+        &self,
+        instance: ty::InstanceDef<'tcx>,
+        promoted: Option<mir::Promoted>,
+    ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+        // do not continue if typeck errors occurred (can only occur in local crate)
+        let def = instance.with_opt_param();
+        if let Some(def) = def.as_local() {
+            if self.tcx.has_typeck_results(def.did) {
+                if let Some(error_reported) = self.tcx.typeck_opt_const_arg(def).tainted_by_errors {
+                    throw_inval!(AlreadyReported(error_reported))
+                }
+            }
+        }
+        trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
+        if let Some(promoted) = promoted {
+            return Ok(&self.tcx.promoted_mir_opt_const_arg(def)[promoted]);
+        }
+        M::load_mir(self, instance)
+    }
+
+    /// Call this on things you got out of the MIR (so it is as generic as the current
+    /// stack frame), to bring it into the proper environment for this interpreter.
+    pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
+        &self,
+        value: T,
+    ) -> T {
+        self.subst_from_frame_and_normalize_erasing_regions(self.frame(), value)
+    }
+
+    /// Call this on things you got out of the MIR (so it is as generic as the provided
+    /// stack frame), to bring it into the proper environment for this interpreter.
+    pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
+        &self,
+        frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
+        value: T,
+    ) -> T {
+        frame.instance.subst_mir_and_normalize_erasing_regions(*self.tcx, self.param_env, value)
+    }
+
+    /// The `substs` are assumed to already be in our interpreter "universe" (param_env).
+    pub(super) fn resolve(
+        &self,
+        def: ty::WithOptConstParam<DefId>,
+        substs: SubstsRef<'tcx>,
+    ) -> InterpResult<'tcx, ty::Instance<'tcx>> {
+        trace!("resolve: {:?}, {:#?}", def, substs);
+        trace!("param_env: {:#?}", self.param_env);
+        trace!("substs: {:#?}", substs);
+        match ty::Instance::resolve_opt_const_arg(*self.tcx, self.param_env, def, substs) {
+            Ok(Some(instance)) => Ok(instance),
+            Ok(None) => throw_inval!(TooGeneric),
+
+            // FIXME(eddyb) this could be a bit more specific than `AlreadyReported`.
+            Err(error_reported) => throw_inval!(AlreadyReported(error_reported)),
+        }
+    }
+
+    #[inline(always)]
+    pub fn layout_of_local(
+        &self,
+        frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
+        local: mir::Local,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+        // `const_prop` runs into this with an invalid (empty) frame, so we
+        // have to support that case (mostly by skipping all caching).
+        match frame.locals.get(local).and_then(|state| state.layout.get()) {
+            None => {
+                let layout = from_known_layout(self.tcx, self.param_env, layout, || {
+                    let local_ty = frame.body.local_decls[local].ty;
+                    let local_ty =
+                        self.subst_from_frame_and_normalize_erasing_regions(frame, local_ty);
+                    self.layout_of(local_ty)
+                })?;
+                if let Some(state) = frame.locals.get(local) {
+                    // Layouts of locals are requested a lot, so we cache them.
+                    state.layout.set(Some(layout));
+                }
+                Ok(layout)
+            }
+            Some(layout) => Ok(layout),
+        }
+    }
+
+    /// Returns the actual dynamic size and alignment of the place at the given type.
+    /// Only the "meta" (metadata) part of the place matters.
+    /// This can fail to provide an answer for extern types.
+    pub(super) fn size_and_align_of(
+        &self,
+        metadata: &MemPlaceMeta<M::PointerTag>,
+        layout: &TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, Option<(Size, Align)>> {
+        if !layout.is_unsized() {
+            return Ok(Some((layout.size, layout.align.abi)));
+        }
+        match layout.ty.kind() {
+            ty::Adt(..) | ty::Tuple(..) => {
+                // First get the size of all statically known fields.
+                // Don't use type_of::sizing_type_of because that expects t to be sized,
+                // and it also rounds up to alignment, which we want to avoid,
+                // as the unsized field's alignment could be smaller.
+                assert!(!layout.ty.is_simd());
+                assert!(layout.fields.count() > 0);
+                trace!("DST layout: {:?}", layout);
+
+                let sized_size = layout.fields.offset(layout.fields.count() - 1);
+                let sized_align = layout.align.abi;
+                trace!(
+                    "DST {} statically sized prefix size: {:?} align: {:?}",
+                    layout.ty,
+                    sized_size,
+                    sized_align
+                );
+
+                // Recurse to get the size of the dynamically sized field (must be
+                // the last field).  Can't have foreign types here, how would we
+                // adjust alignment and size for them?
+                let field = layout.field(self, layout.fields.count() - 1);
+                let (unsized_size, unsized_align) =
+                    match self.size_and_align_of(metadata, &field)? {
+                        Some(size_and_align) => size_and_align,
+                        None => {
+                            // A field with extern type.  If this field is at offset 0, we behave
+                            // like the underlying extern type.
+                            // FIXME: Once we have made decisions for how to handle size and alignment
+                            // of `extern type`, this should be adapted.  It is just a temporary hack
+                            // to get some code to work that probably ought to work.
+                            if sized_size == Size::ZERO {
+                                return Ok(None);
+                            } else {
+                                span_bug!(
+                                    self.cur_span(),
+                                    "Fields cannot be extern types, unless they are at offset 0"
+                                )
+                            }
+                        }
+                    };
+
+                // FIXME (#26403, #27023): We should be adding padding
+                // to `sized_size` (to accommodate the `unsized_align`
+                // required of the unsized field that follows) before
+                // summing it with `sized_size`. (Note that since #26403
+                // is unfixed, we do not yet add the necessary padding
+                // here. But this is where the add would go.)
+
+                // Return the sum of sizes and max of aligns.
+                let size = sized_size + unsized_size; // `Size` addition
+
+                // Choose max of two known alignments (combined value must
+                // be aligned according to more restrictive of the two).
+                let align = sized_align.max(unsized_align);
+
+                // Issue #27023: must add any necessary padding to `size`
+                // (to make it a multiple of `align`) before returning it.
+                let size = size.align_to(align);
+
+                // Check if this brought us over the size limit.
+                if size.bytes() >= self.tcx.data_layout.obj_size_bound() {
+                    throw_ub!(InvalidMeta("total size is bigger than largest supported object"));
+                }
+                Ok(Some((size, align)))
+            }
+            ty::Dynamic(..) => {
+                let vtable = self.scalar_to_ptr(metadata.unwrap_meta());
+                // Read size and align from vtable (already checks size).
+                Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
+            }
+
+            ty::Slice(_) | ty::Str => {
+                let len = metadata.unwrap_meta().to_machine_usize(self)?;
+                let elem = layout.field(self, 0);
+
+                // Make sure the slice is not too big.
+                let size = elem.size.checked_mul(len, self).ok_or_else(|| {
+                    err_ub!(InvalidMeta("slice is bigger than largest supported object"))
+                })?;
+                Ok(Some((size, elem.align.abi)))
+            }
+
+            ty::Foreign(_) => Ok(None),
+
+            _ => span_bug!(self.cur_span(), "size_and_align_of::<{:?}> not supported", layout.ty),
+        }
+    }
+    #[inline]
+    pub fn size_and_align_of_mplace(
+        &self,
+        mplace: &MPlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, Option<(Size, Align)>> {
+        self.size_and_align_of(&mplace.meta, &mplace.layout)
+    }
+
+    pub fn push_stack_frame(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        body: &'mir mir::Body<'tcx>,
+        return_place: Option<&PlaceTy<'tcx, M::PointerTag>>,
+        return_to_block: StackPopCleanup,
+    ) -> InterpResult<'tcx> {
+        // first push a stack frame so we have access to the local substs
+        let pre_frame = Frame {
+            body,
+            loc: Err(body.span), // Span used for errors caused during preamble.
+            return_to_block,
+            return_place: return_place.copied(),
+            // empty local array, we fill it in below, after we are inside the stack frame and
+            // all methods actually know about the frame
+            locals: IndexVec::new(),
+            instance,
+            tracing_span: SpanGuard::new(),
+            extra: (),
+        };
+        let frame = M::init_frame_extra(self, pre_frame)?;
+        self.stack_mut().push(frame);
+
+        // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
+        for const_ in &body.required_consts {
+            let span = const_.span;
+            let const_ =
+                self.subst_from_current_frame_and_normalize_erasing_regions(const_.literal);
+            self.mir_const_to_op(&const_, None).map_err(|err| {
+                // If there was an error, set the span of the current frame to this constant.
+                // Avoiding doing this when evaluation succeeds.
+                self.frame_mut().loc = Err(span);
+                err
+            })?;
+        }
+
+        // Locals are initially uninitialized.
+        let dummy = LocalState { value: LocalValue::Uninitialized, layout: Cell::new(None) };
+        let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
+
+        // Now mark those locals as dead that we do not want to initialize
+        // Mark locals that use `Storage*` annotations as dead on function entry.
+        let always_live = AlwaysLiveLocals::new(self.body());
+        for local in locals.indices() {
+            if !always_live.contains(local) {
+                locals[local].value = LocalValue::Dead;
+            }
+        }
+        // done
+        self.frame_mut().locals = locals;
+        M::after_stack_push(self)?;
+        self.frame_mut().loc = Ok(mir::Location::START);
+
+        let span = info_span!("frame", "{}", instance);
+        self.frame_mut().tracing_span.enter(span);
+
+        Ok(())
+    }
+
+    /// Jump to the given block.
+    #[inline]
+    pub fn go_to_block(&mut self, target: mir::BasicBlock) {
+        self.frame_mut().loc = Ok(mir::Location { block: target, statement_index: 0 });
+    }
+
+    /// *Return* to the given `target` basic block.
+    /// Do *not* use for unwinding! Use `unwind_to_block` instead.
+    ///
+    /// If `target` is `None`, that indicates the function cannot return, so we raise UB.
+    pub fn return_to_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
+        if let Some(target) = target {
+            self.go_to_block(target);
+            Ok(())
+        } else {
+            throw_ub!(Unreachable)
+        }
+    }
+
+    /// *Unwind* to the given `target` basic block.
+    /// Do *not* use for returning! Use `return_to_block` instead.
+    ///
+    /// If `target` is `StackPopUnwind::Skip`, that indicates the function does not need cleanup
+    /// during unwinding, and we will just keep propagating that upwards.
+    ///
+    /// If `target` is `StackPopUnwind::NotAllowed`, that indicates the function does not allow
+    /// unwinding, and doing so is UB.
+    pub fn unwind_to_block(&mut self, target: StackPopUnwind) -> InterpResult<'tcx> {
+        self.frame_mut().loc = match target {
+            StackPopUnwind::Cleanup(block) => Ok(mir::Location { block, statement_index: 0 }),
+            StackPopUnwind::Skip => Err(self.frame_mut().body.span),
+            StackPopUnwind::NotAllowed => {
+                throw_ub_format!("unwinding past a stack frame that does not allow unwinding")
+            }
+        };
+        Ok(())
+    }
+
+    /// Pops the current frame from the stack, deallocating the
+    /// memory for allocated locals.
+    ///
+    /// If `unwinding` is `false`, then we are performing a normal return
+    /// from a function. In this case, we jump back into the frame of the caller,
+    /// and continue execution as normal.
+    ///
+    /// If `unwinding` is `true`, then we are in the middle of a panic,
+    /// and need to unwind this frame. In this case, we jump to the
+    /// `cleanup` block for the function, which is responsible for running
+    /// `Drop` impls for any locals that have been initialized at this point.
+    /// The cleanup block ends with a special `Resume` terminator, which will
+    /// cause us to continue unwinding.
+    pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> {
+        info!(
+            "popping stack frame ({})",
+            if unwinding { "during unwinding" } else { "returning from function" }
+        );
+
+        // Sanity check `unwinding`.
+        assert_eq!(
+            unwinding,
+            match self.frame().loc {
+                Ok(loc) => self.body().basic_blocks()[loc.block].is_cleanup,
+                Err(_) => true,
+            }
+        );
+
+        if unwinding && self.frame_idx() == 0 {
+            throw_ub_format!("unwinding past the topmost frame of the stack");
+        }
+
+        let frame =
+            self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
+
+        if !unwinding {
+            // Copy the return value to the caller's stack frame.
+            if let Some(ref return_place) = frame.return_place {
+                let op = self.access_local(&frame, mir::RETURN_PLACE, None)?;
+                self.copy_op_transmute(&op, return_place)?;
+                trace!("{:?}", self.dump_place(**return_place));
+            } else {
+                throw_ub!(Unreachable);
+            }
+        }
+
+        let return_to_block = frame.return_to_block;
+
+        // Now where do we jump next?
+
+        // Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
+        // In that case, we return early. We also avoid validation in that case,
+        // because this is CTFE and the final value will be thoroughly validated anyway.
+        let cleanup = match return_to_block {
+            StackPopCleanup::Goto { .. } => true,
+            StackPopCleanup::None { cleanup, .. } => cleanup,
+        };
+
+        if !cleanup {
+            assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
+            assert!(!unwinding, "tried to skip cleanup during unwinding");
+            // Leak the locals, skip validation, skip machine hook.
+            return Ok(());
+        }
+
+        // Cleanup: deallocate all locals that are backed by an allocation.
+        for local in &frame.locals {
+            self.deallocate_local(local.value)?;
+        }
+
+        if M::after_stack_pop(self, frame, unwinding)? == StackPopJump::NoJump {
+            // The hook already did everything.
+            // We want to skip the `info!` below, hence early return.
+            return Ok(());
+        }
+        // Normal return, figure out where to jump.
+        if unwinding {
+            // Follow the unwind edge.
+            let unwind = match return_to_block {
+                StackPopCleanup::Goto { unwind, .. } => unwind,
+                StackPopCleanup::None { .. } => {
+                    panic!("Encountered StackPopCleanup::None when unwinding!")
+                }
+            };
+            self.unwind_to_block(unwind)
+        } else {
+            // Follow the normal return edge.
+            match return_to_block {
+                StackPopCleanup::Goto { ret, .. } => self.return_to_block(ret),
+                StackPopCleanup::None { .. } => Ok(()),
+            }
+        }
+    }
+
+    /// Mark a storage as live, killing the previous content.
+    pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
+        assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
+        trace!("{:?} is now live", local);
+
+        let local_val = LocalValue::Uninitialized;
+        // StorageLive expects the local to be dead, and marks it live.
+        let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
+        if !matches!(old, LocalValue::Dead) {
+            throw_ub_format!("StorageLive on a local that was already live");
+        }
+        Ok(())
+    }
+
+    pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> {
+        assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
+        trace!("{:?} is now dead", local);
+
+        // It is entirely okay for this local to be already dead (at least that's how we currently generate MIR)
+        let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead);
+        self.deallocate_local(old)?;
+        Ok(())
+    }
+
+    fn deallocate_local(&mut self, local: LocalValue<M::PointerTag>) -> InterpResult<'tcx> {
+        if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
+            // All locals have a backing allocation, even if the allocation is empty
+            // due to the local having ZST type. Hence we can `unwrap`.
+            trace!(
+                "deallocating local {:?}: {:?}",
+                local,
+                self.memory.dump_alloc(ptr.provenance.unwrap().get_alloc_id())
+            );
+            self.memory.deallocate(ptr, None, MemoryKind::Stack)?;
+        };
+        Ok(())
+    }
+
+    pub fn eval_to_allocation(
+        &self,
+        gid: GlobalId<'tcx>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
+        // and thus don't care about the parameter environment. While we could just use
+        // `self.param_env`, that would mean we invoke the query to evaluate the static
+        // with different parameter environments, thus causing the static to be evaluated
+        // multiple times.
+        let param_env = if self.tcx.is_static(gid.instance.def_id()) {
+            ty::ParamEnv::reveal_all()
+        } else {
+            self.param_env
+        };
+        let val = self.tcx.eval_to_allocation_raw(param_env.and(gid))?;
+        self.raw_const_to_mplace(val)
+    }
+
+    #[must_use]
+    pub fn dump_place(&'a self, place: Place<M::PointerTag>) -> PlacePrinter<'a, 'mir, 'tcx, M> {
+        PlacePrinter { ecx: self, place }
+    }
+
+    #[must_use]
+    pub fn generate_stacktrace(&self) -> Vec<FrameInfo<'tcx>> {
+        let mut frames = Vec::new();
+        for frame in self
+            .stack()
+            .iter()
+            .rev()
+            .skip_while(|frame| frame.instance.def.requires_caller_location(*self.tcx))
+        {
+            let lint_root = frame.current_source_info().and_then(|source_info| {
+                match &frame.body.source_scopes[source_info.scope].local_data {
+                    mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
+                    mir::ClearCrossCrate::Clear => None,
+                }
+            });
+            let span = frame.current_span();
+
+            frames.push(FrameInfo { span, instance: frame.instance, lint_root });
+        }
+        trace!("generate stacktrace: {:#?}", frames);
+        frames
+    }
+}
+
+#[doc(hidden)]
+/// Helper struct for the `dump_place` function.
+pub struct PlacePrinter<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    ecx: &'a InterpCx<'mir, 'tcx, M>,
+    place: Place<M::PointerTag>,
+}
+
+impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
+    for PlacePrinter<'a, 'mir, 'tcx, M>
+{
+    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self.place {
+            Place::Local { frame, local } => {
+                let mut allocs = Vec::new();
+                write!(fmt, "{:?}", local)?;
+                if frame != self.ecx.frame_idx() {
+                    write!(fmt, " ({} frames up)", self.ecx.frame_idx() - frame)?;
+                }
+                write!(fmt, ":")?;
+
+                match self.ecx.stack()[frame].locals[local].value {
+                    LocalValue::Dead => write!(fmt, " is dead")?,
+                    LocalValue::Uninitialized => write!(fmt, " is uninitialized")?,
+                    LocalValue::Live(Operand::Indirect(mplace)) => {
+                        write!(
+                            fmt,
+                            " by align({}){} ref {:?}:",
+                            mplace.align.bytes(),
+                            match mplace.meta {
+                                MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
+                                MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(),
+                            },
+                            mplace.ptr,
+                        )?;
+                        allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id));
+                    }
+                    LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
+                        write!(fmt, " {:?}", val)?;
+                        if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val {
+                            allocs.push(ptr.provenance.get_alloc_id());
+                        }
+                    }
+                    LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
+                        write!(fmt, " ({:?}, {:?})", val1, val2)?;
+                        if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val1 {
+                            allocs.push(ptr.provenance.get_alloc_id());
+                        }
+                        if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val2 {
+                            allocs.push(ptr.provenance.get_alloc_id());
+                        }
+                    }
+                }
+
+                write!(fmt, ": {:?}", self.ecx.memory.dump_allocs(allocs))
+            }
+            Place::Ptr(mplace) => match mplace.ptr.provenance.map(Provenance::get_alloc_id) {
+                Some(alloc_id) => write!(
+                    fmt,
+                    "by align({}) ref {:?}: {:?}",
+                    mplace.align.bytes(),
+                    mplace.ptr,
+                    self.ecx.memory.dump_alloc(alloc_id)
+                ),
+                ptr => write!(fmt, " integral by ref: {:?}", ptr),
+            },
+        }
+    }
+}
+
+impl<'ctx, 'mir, 'tcx, Tag: Provenance, Extra> HashStable<StableHashingContext<'ctx>>
+    for Frame<'mir, 'tcx, Tag, Extra>
+where
+    Extra: HashStable<StableHashingContext<'ctx>>,
+    Tag: HashStable<StableHashingContext<'ctx>>,
+{
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'ctx>, hasher: &mut StableHasher) {
+        // Exhaustive match on fields to make sure we forget no field.
+        let Frame {
+            body,
+            instance,
+            return_to_block,
+            return_place,
+            locals,
+            loc,
+            extra,
+            tracing_span: _,
+        } = self;
+        body.hash_stable(hcx, hasher);
+        instance.hash_stable(hcx, hasher);
+        return_to_block.hash_stable(hcx, hasher);
+        return_place.as_ref().map(|r| &**r).hash_stable(hcx, hasher);
+        locals.hash_stable(hcx, hasher);
+        loc.hash_stable(hcx, hasher);
+        extra.hash_stable(hcx, hasher);
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
new file mode 100644
index 00000000000..84e79408397
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -0,0 +1,437 @@
+//! This module specifies the type based interner for constants.
+//!
+//! After a const evaluation has computed a value, before we destroy the const evaluator's session
+//! memory, we need to extract all memory allocations to the global memory pool so they stay around.
+//!
+//! In principle, this is not very complicated: we recursively walk the final value, follow all the
+//! pointers, and move all reachable allocations to the global `tcx` memory. The only complication
+//! is picking the right mutability for the allocations in a `static` initializer: we want to make
+//! as many allocations as possible immutable so LLVM can put them into read-only memory. At the
+//! same time, we need to make memory that could be mutated by the program mutable to avoid
+//! incorrect compilations. To achieve this, we do a type-based traversal of the final value,
+//! tracking mutable and shared references and `UnsafeCell` to determine the current mutability.
+//! (In principle, we could skip this type-based part for `const` and promoteds, as they need to be
+//! always immutable. At least for `const` however we use this opportunity to reject any `const`
+//! that contains allocations whose mutability we cannot identify.)
+
+use super::validity::RefTracking;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::ErrorReported;
+use rustc_hir as hir;
+use rustc_middle::mir::interpret::InterpResult;
+use rustc_middle::ty::{self, layout::TyAndLayout, Ty};
+
+use rustc_ast::Mutability;
+
+use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy, ValueVisitor};
+use crate::const_eval;
+
+pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine<
+    'mir,
+    'tcx,
+    MemoryKind = T,
+    PointerTag = AllocId,
+    ExtraFnVal = !,
+    FrameExtra = (),
+    AllocExtra = (),
+    MemoryMap = FxHashMap<AllocId, (MemoryKind<T>, Allocation)>,
+>;
+
+struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>> {
+    /// The ectx from which we intern.
+    ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
+    /// Previously encountered safe references.
+    ref_tracking: &'rt mut RefTracking<(MPlaceTy<'tcx>, InternMode)>,
+    /// A list of all encountered allocations. After type-based interning, we traverse this list to
+    /// also intern allocations that are only referenced by a raw pointer or inside a union.
+    leftover_allocations: &'rt mut FxHashSet<AllocId>,
+    /// The root kind of the value that we're looking at. This field is never mutated for a
+    /// particular allocation. It is primarily used to make as many allocations as possible
+    /// read-only so LLVM can place them in const memory.
+    mode: InternMode,
+    /// This field stores whether we are *currently* inside an `UnsafeCell`. This can affect
+    /// the intern mode of references we encounter.
+    inside_unsafe_cell: bool,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
+enum InternMode {
+    /// A static and its current mutability.  Below shared references inside a `static mut`,
+    /// this is *immutable*, and below mutable references inside an `UnsafeCell`, this
+    /// is *mutable*.
+    Static(hir::Mutability),
+    /// A `const`.
+    Const,
+}
+
+/// Signalling data structure to ensure we don't recurse
+/// into the memory of other constants or statics
+struct IsStaticOrFn;
+
+/// Intern an allocation without looking at its children.
+/// `mode` is the mode of the environment where we found this pointer.
+/// `mutablity` is the mutability of the place to be interned; even if that says
+/// `immutable` things might become mutable if `ty` is not frozen.
+/// `ty` can be `None` if there is no potential interior mutability
+/// to account for (e.g. for vtables).
+fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>(
+    ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
+    leftover_allocations: &'rt mut FxHashSet<AllocId>,
+    alloc_id: AllocId,
+    mode: InternMode,
+    ty: Option<Ty<'tcx>>,
+) -> Option<IsStaticOrFn> {
+    trace!("intern_shallow {:?} with {:?}", alloc_id, mode);
+    // remove allocation
+    let tcx = ecx.tcx;
+    let (kind, mut alloc) = match ecx.memory.alloc_map.remove(&alloc_id) {
+        Some(entry) => entry,
+        None => {
+            // Pointer not found in local memory map. It is either a pointer to the global
+            // map, or dangling.
+            // If the pointer is dangling (neither in local nor global memory), we leave it
+            // to validation to error -- it has the much better error messages, pointing out where
+            // in the value the dangling reference lies.
+            // The `delay_span_bug` ensures that we don't forget such a check in validation.
+            if tcx.get_global_alloc(alloc_id).is_none() {
+                tcx.sess.delay_span_bug(ecx.tcx.span, "tried to intern dangling pointer");
+            }
+            // treat dangling pointers like other statics
+            // just to stop trying to recurse into them
+            return Some(IsStaticOrFn);
+        }
+    };
+    // This match is just a canary for future changes to `MemoryKind`, which most likely need
+    // changes in this function.
+    match kind {
+        MemoryKind::Stack
+        | MemoryKind::Machine(const_eval::MemoryKind::Heap)
+        | MemoryKind::CallerLocation => {}
+    }
+    // Set allocation mutability as appropriate. This is used by LLVM to put things into
+    // read-only memory, and also by Miri when evaluating other globals that
+    // access this one.
+    if let InternMode::Static(mutability) = mode {
+        // For this, we need to take into account `UnsafeCell`. When `ty` is `None`, we assume
+        // no interior mutability.
+        let frozen = ty.map_or(true, |ty| ty.is_freeze(ecx.tcx, ecx.param_env));
+        // For statics, allocation mutability is the combination of place mutability and
+        // type mutability.
+        // The entire allocation needs to be mutable if it contains an `UnsafeCell` anywhere.
+        let immutable = mutability == Mutability::Not && frozen;
+        if immutable {
+            alloc.mutability = Mutability::Not;
+        } else {
+            // Just making sure we are not "upgrading" an immutable allocation to mutable.
+            assert_eq!(alloc.mutability, Mutability::Mut);
+        }
+    } else {
+        // No matter what, *constants are never mutable*. Mutating them is UB.
+        // See const_eval::machine::MemoryExtra::can_access_statics for why
+        // immutability is so important.
+
+        // Validation will ensure that there is no `UnsafeCell` on an immutable allocation.
+        alloc.mutability = Mutability::Not;
+    };
+    // link the alloc id to the actual allocation
+    let alloc = tcx.intern_const_alloc(alloc);
+    leftover_allocations.extend(alloc.relocations().iter().map(|&(_, alloc_id)| alloc_id));
+    tcx.set_alloc_id_memory(alloc_id, alloc);
+    None
+}
+
+impl<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>
+    InternVisitor<'rt, 'mir, 'tcx, M>
+{
+    fn intern_shallow(
+        &mut self,
+        alloc_id: AllocId,
+        mode: InternMode,
+        ty: Option<Ty<'tcx>>,
+    ) -> Option<IsStaticOrFn> {
+        intern_shallow(self.ecx, self.leftover_allocations, alloc_id, mode, ty)
+    }
+}
+
+impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>
+    ValueVisitor<'mir, 'tcx, M> for InternVisitor<'rt, 'mir, 'tcx, M>
+{
+    type V = MPlaceTy<'tcx>;
+
+    #[inline(always)]
+    fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
+        &self.ecx
+    }
+
+    fn visit_aggregate(
+        &mut self,
+        mplace: &MPlaceTy<'tcx>,
+        fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
+    ) -> InterpResult<'tcx> {
+        // ZSTs cannot contain pointers, so we can skip them.
+        if mplace.layout.is_zst() {
+            return Ok(());
+        }
+
+        if let Some(def) = mplace.layout.ty.ty_adt_def() {
+            if Some(def.did) == self.ecx.tcx.lang_items().unsafe_cell_type() {
+                // We are crossing over an `UnsafeCell`, we can mutate again. This means that
+                // References we encounter inside here are interned as pointing to mutable
+                // allocations.
+                // Remember the `old` value to handle nested `UnsafeCell`.
+                let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
+                let walked = self.walk_aggregate(mplace, fields);
+                self.inside_unsafe_cell = old;
+                return walked;
+            }
+        }
+
+        self.walk_aggregate(mplace, fields)
+    }
+
+    fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
+        // Handle Reference types, as these are the only relocations supported by const eval.
+        // Raw pointers (and boxes) are handled by the `leftover_relocations` logic.
+        let tcx = self.ecx.tcx;
+        let ty = mplace.layout.ty;
+        if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() {
+            let value = self.ecx.read_immediate(&(*mplace).into())?;
+            let mplace = self.ecx.ref_to_mplace(&value)?;
+            assert_eq!(mplace.layout.ty, referenced_ty);
+            // Handle trait object vtables.
+            if let ty::Dynamic(..) =
+                tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
+            {
+                let ptr = self.ecx.scalar_to_ptr(mplace.meta.unwrap_meta());
+                if let Some(alloc_id) = ptr.provenance {
+                    // Explicitly choose const mode here, since vtables are immutable, even
+                    // if the reference of the fat pointer is mutable.
+                    self.intern_shallow(alloc_id, InternMode::Const, None);
+                } else {
+                    // Validation will error (with a better message) on an invalid vtable pointer.
+                    // Let validation show the error message, but make sure it *does* error.
+                    tcx.sess
+                        .delay_span_bug(tcx.span, "vtables pointers cannot be integer pointers");
+                }
+            }
+            // Check if we have encountered this pointer+layout combination before.
+            // Only recurse for allocation-backed pointers.
+            if let Some(alloc_id) = mplace.ptr.provenance {
+                // Compute the mode with which we intern this. Our goal here is to make as many
+                // statics as we can immutable so they can be placed in read-only memory by LLVM.
+                let ref_mode = match self.mode {
+                    InternMode::Static(mutbl) => {
+                        // In statics, merge outer mutability with reference mutability and
+                        // take into account whether we are in an `UnsafeCell`.
+
+                        // The only way a mutable reference actually works as a mutable reference is
+                        // by being in a `static mut` directly or behind another mutable reference.
+                        // If there's an immutable reference or we are inside a `static`, then our
+                        // mutable reference is equivalent to an immutable one. As an example:
+                        // `&&mut Foo` is semantically equivalent to `&&Foo`
+                        match ref_mutability {
+                            _ if self.inside_unsafe_cell => {
+                                // Inside an `UnsafeCell` is like inside a `static mut`, the "outer"
+                                // mutability does not matter.
+                                InternMode::Static(ref_mutability)
+                            }
+                            Mutability::Not => {
+                                // A shared reference, things become immutable.
+                                // We do *not* consider `freeze` here: `intern_shallow` considers
+                                // `freeze` for the actual mutability of this allocation; the intern
+                                // mode for references contained in this allocation is tracked more
+                                // precisely when traversing the referenced data (by tracking
+                                // `UnsafeCell`). This makes sure that `&(&i32, &Cell<i32>)` still
+                                // has the left inner reference interned into a read-only
+                                // allocation.
+                                InternMode::Static(Mutability::Not)
+                            }
+                            Mutability::Mut => {
+                                // Mutable reference.
+                                InternMode::Static(mutbl)
+                            }
+                        }
+                    }
+                    InternMode::Const => {
+                        // Ignore `UnsafeCell`, everything is immutable.  Validity does some sanity
+                        // checking for mutable references that we encounter -- they must all be
+                        // ZST.
+                        InternMode::Const
+                    }
+                };
+                match self.intern_shallow(alloc_id, ref_mode, Some(referenced_ty)) {
+                    // No need to recurse, these are interned already and statics may have
+                    // cycles, so we don't want to recurse there
+                    Some(IsStaticOrFn) => {}
+                    // intern everything referenced by this value. The mutability is taken from the
+                    // reference. It is checked above that mutable references only happen in
+                    // `static mut`
+                    None => self.ref_tracking.track((mplace, ref_mode), || ()),
+                }
+            }
+            Ok(())
+        } else {
+            // Not a reference -- proceed recursively.
+            self.walk_value(mplace)
+        }
+    }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
+pub enum InternKind {
+    /// The `mutability` of the static, ignoring the type which may have interior mutability.
+    Static(hir::Mutability),
+    Constant,
+    Promoted,
+}
+
+/// Intern `ret` and everything it references.
+///
+/// This *cannot raise an interpreter error*.  Doing so is left to validation, which
+/// tracks where in the value we are and thus can show much better error messages.
+/// Any errors here would anyway be turned into `const_err` lints, whereas validation failures
+/// are hard errors.
+#[tracing::instrument(level = "debug", skip(ecx))]
+pub fn intern_const_alloc_recursive<M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>(
+    ecx: &mut InterpCx<'mir, 'tcx, M>,
+    intern_kind: InternKind,
+    ret: &MPlaceTy<'tcx>,
+) -> Result<(), ErrorReported>
+where
+    'tcx: 'mir,
+{
+    let tcx = ecx.tcx;
+    let base_intern_mode = match intern_kind {
+        InternKind::Static(mutbl) => InternMode::Static(mutbl),
+        // `Constant` includes array lengths.
+        InternKind::Constant | InternKind::Promoted => InternMode::Const,
+    };
+
+    // Type based interning.
+    // `ref_tracking` tracks typed references we have already interned and still need to crawl for
+    // more typed information inside them.
+    // `leftover_allocations` collects *all* allocations we see, because some might not
+    // be available in a typed way. They get interned at the end.
+    let mut ref_tracking = RefTracking::empty();
+    let leftover_allocations = &mut FxHashSet::default();
+
+    // start with the outermost allocation
+    intern_shallow(
+        ecx,
+        leftover_allocations,
+        // The outermost allocation must exist, because we allocated it with
+        // `Memory::allocate`.
+        ret.ptr.provenance.unwrap(),
+        base_intern_mode,
+        Some(ret.layout.ty),
+    );
+
+    ref_tracking.track((*ret, base_intern_mode), || ());
+
+    while let Some(((mplace, mode), _)) = ref_tracking.todo.pop() {
+        let res = InternVisitor {
+            ref_tracking: &mut ref_tracking,
+            ecx,
+            mode,
+            leftover_allocations,
+            inside_unsafe_cell: false,
+        }
+        .visit_value(&mplace);
+        // We deliberately *ignore* interpreter errors here.  When there is a problem, the remaining
+        // references are "leftover"-interned, and later validation will show a proper error
+        // and point at the right part of the value causing the problem.
+        match res {
+            Ok(()) => {}
+            Err(error) => {
+                ecx.tcx.sess.delay_span_bug(
+                    ecx.tcx.span,
+                    &format!(
+                        "error during interning should later cause validation failure: {}",
+                        error
+                    ),
+                );
+            }
+        }
+    }
+
+    // Intern the rest of the allocations as mutable. These might be inside unions, padding, raw
+    // pointers, ... So we can't intern them according to their type rules
+
+    let mut todo: Vec<_> = leftover_allocations.iter().cloned().collect();
+    while let Some(alloc_id) = todo.pop() {
+        if let Some((_, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) {
+            // We can't call the `intern_shallow` method here, as its logic is tailored to safe
+            // references and a `leftover_allocations` set (where we only have a todo-list here).
+            // So we hand-roll the interning logic here again.
+            match intern_kind {
+                // Statics may contain mutable allocations even behind relocations.
+                // Even for immutable statics it would be ok to have mutable allocations behind
+                // raw pointers, e.g. for `static FOO: *const AtomicUsize = &AtomicUsize::new(42)`.
+                InternKind::Static(_) => {}
+                // Raw pointers in promoteds may only point to immutable things so we mark
+                // everything as immutable.
+                // It is UB to mutate through a raw pointer obtained via an immutable reference:
+                // Since all references and pointers inside a promoted must by their very definition
+                // be created from an immutable reference (and promotion also excludes interior
+                // mutability), mutating through them would be UB.
+                // There's no way we can check whether the user is using raw pointers correctly,
+                // so all we can do is mark this as immutable here.
+                InternKind::Promoted => {
+                    // See const_eval::machine::MemoryExtra::can_access_statics for why
+                    // immutability is so important.
+                    alloc.mutability = Mutability::Not;
+                }
+                InternKind::Constant => {
+                    // If it's a constant, we should not have any "leftovers" as everything
+                    // is tracked by const-checking.
+                    // FIXME: downgrade this to a warning? It rejects some legitimate consts,
+                    // such as `const CONST_RAW: *const Vec<i32> = &Vec::new() as *const _;`.
+                    ecx.tcx
+                        .sess
+                        .span_err(ecx.tcx.span, "untyped pointers are not allowed in constant");
+                    // For better errors later, mark the allocation as immutable.
+                    alloc.mutability = Mutability::Not;
+                }
+            }
+            let alloc = tcx.intern_const_alloc(alloc);
+            tcx.set_alloc_id_memory(alloc_id, alloc);
+            for &(_, alloc_id) in alloc.relocations().iter() {
+                if leftover_allocations.insert(alloc_id) {
+                    todo.push(alloc_id);
+                }
+            }
+        } else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) {
+            // Codegen does not like dangling pointers, and generally `tcx` assumes that
+            // all allocations referenced anywhere actually exist. So, make sure we error here.
+            ecx.tcx.sess.span_err(ecx.tcx.span, "encountered dangling pointer in final constant");
+            return Err(ErrorReported);
+        } else if ecx.tcx.get_global_alloc(alloc_id).is_none() {
+            // We have hit an `AllocId` that is neither in local or global memory and isn't
+            // marked as dangling by local memory.  That should be impossible.
+            span_bug!(ecx.tcx.span, "encountered unknown alloc id {:?}", alloc_id);
+        }
+    }
+    Ok(())
+}
+
+impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
+    InterpCx<'mir, 'tcx, M>
+{
+    /// A helper function that allocates memory for the layout given and gives you access to mutate
+    /// it. Once your own mutation code is done, the backing `Allocation` is removed from the
+    /// current `Memory` and returned.
+    pub fn intern_with_temp_alloc(
+        &mut self,
+        layout: TyAndLayout<'tcx>,
+        f: impl FnOnce(
+            &mut InterpCx<'mir, 'tcx, M>,
+            &PlaceTy<'tcx, M::PointerTag>,
+        ) -> InterpResult<'tcx, ()>,
+    ) -> InterpResult<'tcx, &'tcx Allocation> {
+        let dest = self.allocate(layout, MemoryKind::Stack)?;
+        f(self, &dest.into())?;
+        let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
+        alloc.mutability = Mutability::Not;
+        Ok(self.tcx.intern_const_alloc(alloc))
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
new file mode 100644
index 00000000000..07e974b7266
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -0,0 +1,585 @@
+//! Intrinsics and other functions that the miri engine executes without
+//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
+//! and miri.
+
+use std::convert::TryFrom;
+
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::{
+    self,
+    interpret::{ConstValue, GlobalId, InterpResult, Scalar},
+    BinOp,
+};
+use rustc_middle::ty;
+use rustc_middle::ty::layout::LayoutOf as _;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_target::abi::{Abi, Align, Primitive, Size};
+
+use super::{
+    util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
+    Pointer,
+};
+
+mod caller_location;
+mod type_name;
+
+fn numeric_intrinsic<Tag>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Tag> {
+    let size = match kind {
+        Primitive::Int(integer, _) => integer.size(),
+        _ => bug!("invalid `{}` argument: {:?}", name, bits),
+    };
+    let extra = 128 - u128::from(size.bits());
+    let bits_out = match name {
+        sym::ctpop => u128::from(bits.count_ones()),
+        sym::ctlz => u128::from(bits.leading_zeros()) - extra,
+        sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
+        sym::bswap => (bits << extra).swap_bytes(),
+        sym::bitreverse => (bits << extra).reverse_bits(),
+        _ => bug!("not a numeric intrinsic: {}", name),
+    };
+    Scalar::from_uint(bits_out, size)
+}
+
+/// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
+/// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
+crate fn eval_nullary_intrinsic<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    def_id: DefId,
+    substs: SubstsRef<'tcx>,
+) -> InterpResult<'tcx, ConstValue<'tcx>> {
+    let tp_ty = substs.type_at(0);
+    let name = tcx.item_name(def_id);
+    Ok(match name {
+        sym::type_name => {
+            ensure_monomorphic_enough(tcx, tp_ty)?;
+            let alloc = type_name::alloc_type_name(tcx, tp_ty);
+            ConstValue::Slice { data: alloc, start: 0, end: alloc.len() }
+        }
+        sym::needs_drop => {
+            ensure_monomorphic_enough(tcx, tp_ty)?;
+            ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env))
+        }
+        sym::min_align_of | sym::pref_align_of => {
+            // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
+            let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
+            let n = match name {
+                sym::pref_align_of => layout.align.pref.bytes(),
+                sym::min_align_of => layout.align.abi.bytes(),
+                _ => bug!(),
+            };
+            ConstValue::from_machine_usize(n, &tcx)
+        }
+        sym::type_id => {
+            ensure_monomorphic_enough(tcx, tp_ty)?;
+            ConstValue::from_u64(tcx.type_id_hash(tp_ty))
+        }
+        sym::variant_count => match tp_ty.kind() {
+            // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
+            ty::Adt(ref adt, _) => ConstValue::from_machine_usize(adt.variants.len() as u64, &tcx),
+            ty::Projection(_)
+            | ty::Opaque(_, _)
+            | ty::Param(_)
+            | ty::Bound(_, _)
+            | ty::Placeholder(_)
+            | ty::Infer(_) => throw_inval!(TooGeneric),
+            ty::Bool
+            | ty::Char
+            | ty::Int(_)
+            | ty::Uint(_)
+            | ty::Float(_)
+            | ty::Foreign(_)
+            | ty::Str
+            | ty::Array(_, _)
+            | ty::Slice(_)
+            | ty::RawPtr(_)
+            | ty::Ref(_, _, _)
+            | ty::FnDef(_, _)
+            | ty::FnPtr(_)
+            | ty::Dynamic(_, _)
+            | ty::Closure(_, _)
+            | ty::Generator(_, _, _)
+            | ty::GeneratorWitness(_)
+            | ty::Never
+            | ty::Tuple(_)
+            | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx),
+        },
+        other => bug!("`{}` is not a zero arg intrinsic", other),
+    })
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Returns `true` if emulation happened.
+    /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
+    /// intrinsic handling.
+    pub fn emulate_intrinsic(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx, M::PointerTag>],
+        ret: Option<(&PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
+    ) -> InterpResult<'tcx, bool> {
+        let substs = instance.substs;
+        let intrinsic_name = self.tcx.item_name(instance.def_id());
+
+        // First handle intrinsics without return place.
+        let (dest, ret) = match ret {
+            None => match intrinsic_name {
+                sym::transmute => throw_ub_format!("transmuting to uninhabited type"),
+                sym::abort => M::abort(self, "the program aborted execution".to_owned())?,
+                // Unsupported diverging intrinsic.
+                _ => return Ok(false),
+            },
+            Some(p) => p,
+        };
+
+        // Keep the patterns in this match ordered the same as the list in
+        // `src/librustc_middle/ty/constness.rs`
+        match intrinsic_name {
+            sym::caller_location => {
+                let span = self.find_closest_untracked_caller_location();
+                let location = self.alloc_caller_location_for_span(span);
+                self.write_immediate(location.to_ref(self), dest)?;
+            }
+
+            sym::min_align_of_val | sym::size_of_val => {
+                // Avoid `deref_operand` -- this is not a deref, the ptr does not have to be
+                // dereferencable!
+                let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
+                let (size, align) = self
+                    .size_and_align_of_mplace(&place)?
+                    .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
+
+                let result = match intrinsic_name {
+                    sym::min_align_of_val => align.bytes(),
+                    sym::size_of_val => size.bytes(),
+                    _ => bug!(),
+                };
+
+                self.write_scalar(Scalar::from_machine_usize(result, self), dest)?;
+            }
+
+            sym::min_align_of
+            | sym::pref_align_of
+            | sym::needs_drop
+            | sym::type_id
+            | sym::type_name
+            | sym::variant_count => {
+                let gid = GlobalId { instance, promoted: None };
+                let ty = match intrinsic_name {
+                    sym::min_align_of | sym::pref_align_of | sym::variant_count => {
+                        self.tcx.types.usize
+                    }
+                    sym::needs_drop => self.tcx.types.bool,
+                    sym::type_id => self.tcx.types.u64,
+                    sym::type_name => self.tcx.mk_static_str(),
+                    _ => bug!("already checked for nullary intrinsics"),
+                };
+                let val =
+                    self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
+                let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
+                self.copy_op(&val, dest)?;
+            }
+
+            sym::ctpop
+            | sym::cttz
+            | sym::cttz_nonzero
+            | sym::ctlz
+            | sym::ctlz_nonzero
+            | sym::bswap
+            | sym::bitreverse => {
+                let ty = substs.type_at(0);
+                let layout_of = self.layout_of(ty)?;
+                let val = self.read_scalar(&args[0])?.check_init()?;
+                let bits = val.to_bits(layout_of.size)?;
+                let kind = match layout_of.abi {
+                    Abi::Scalar(ref scalar) => scalar.value,
+                    _ => span_bug!(
+                        self.cur_span(),
+                        "{} called on invalid type {:?}",
+                        intrinsic_name,
+                        ty
+                    ),
+                };
+                let (nonzero, intrinsic_name) = match intrinsic_name {
+                    sym::cttz_nonzero => (true, sym::cttz),
+                    sym::ctlz_nonzero => (true, sym::ctlz),
+                    other => (false, other),
+                };
+                if nonzero && bits == 0 {
+                    throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name);
+                }
+                let out_val = numeric_intrinsic(intrinsic_name, bits, kind);
+                self.write_scalar(out_val, dest)?;
+            }
+            sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
+                let lhs = self.read_immediate(&args[0])?;
+                let rhs = self.read_immediate(&args[1])?;
+                let bin_op = match intrinsic_name {
+                    sym::add_with_overflow => BinOp::Add,
+                    sym::sub_with_overflow => BinOp::Sub,
+                    sym::mul_with_overflow => BinOp::Mul,
+                    _ => bug!("Already checked for int ops"),
+                };
+                self.binop_with_overflow(bin_op, &lhs, &rhs, dest)?;
+            }
+            sym::saturating_add | sym::saturating_sub => {
+                let l = self.read_immediate(&args[0])?;
+                let r = self.read_immediate(&args[1])?;
+                let is_add = intrinsic_name == sym::saturating_add;
+                let (val, overflowed, _ty) = self.overflowing_binary_op(
+                    if is_add { BinOp::Add } else { BinOp::Sub },
+                    &l,
+                    &r,
+                )?;
+                let val = if overflowed {
+                    let num_bits = l.layout.size.bits();
+                    if l.layout.abi.is_signed() {
+                        // For signed ints the saturated value depends on the sign of the first
+                        // term since the sign of the second term can be inferred from this and
+                        // the fact that the operation has overflowed (if either is 0 no
+                        // overflow can occur)
+                        let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?;
+                        let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
+                        if first_term_positive {
+                            // Negative overflow not possible since the positive first term
+                            // can only increase an (in range) negative term for addition
+                            // or corresponding negated positive term for subtraction
+                            Scalar::from_uint(
+                                (1u128 << (num_bits - 1)) - 1, // max positive
+                                Size::from_bits(num_bits),
+                            )
+                        } else {
+                            // Positive overflow not possible for similar reason
+                            // max negative
+                            Scalar::from_uint(1u128 << (num_bits - 1), Size::from_bits(num_bits))
+                        }
+                    } else {
+                        // unsigned
+                        if is_add {
+                            // max unsigned
+                            Scalar::from_uint(
+                                u128::MAX >> (128 - num_bits),
+                                Size::from_bits(num_bits),
+                            )
+                        } else {
+                            // underflow to 0
+                            Scalar::from_uint(0u128, Size::from_bits(num_bits))
+                        }
+                    }
+                } else {
+                    val
+                };
+                self.write_scalar(val, dest)?;
+            }
+            sym::discriminant_value => {
+                let place = self.deref_operand(&args[0])?;
+                let discr_val = self.read_discriminant(&place.into())?.0;
+                self.write_scalar(discr_val, dest)?;
+            }
+            sym::unchecked_shl
+            | sym::unchecked_shr
+            | sym::unchecked_add
+            | sym::unchecked_sub
+            | sym::unchecked_mul
+            | sym::unchecked_div
+            | sym::unchecked_rem => {
+                let l = self.read_immediate(&args[0])?;
+                let r = self.read_immediate(&args[1])?;
+                let bin_op = match intrinsic_name {
+                    sym::unchecked_shl => BinOp::Shl,
+                    sym::unchecked_shr => BinOp::Shr,
+                    sym::unchecked_add => BinOp::Add,
+                    sym::unchecked_sub => BinOp::Sub,
+                    sym::unchecked_mul => BinOp::Mul,
+                    sym::unchecked_div => BinOp::Div,
+                    sym::unchecked_rem => BinOp::Rem,
+                    _ => bug!("Already checked for int ops"),
+                };
+                let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?;
+                if overflowed {
+                    let layout = self.layout_of(substs.type_at(0))?;
+                    let r_val = r.to_scalar()?.to_bits(layout.size)?;
+                    if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
+                        throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
+                    } else {
+                        throw_ub_format!("overflow executing `{}`", intrinsic_name);
+                    }
+                }
+                self.write_scalar(val, dest)?;
+            }
+            sym::rotate_left | sym::rotate_right => {
+                // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
+                // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
+                let layout = self.layout_of(substs.type_at(0))?;
+                let val = self.read_scalar(&args[0])?.check_init()?;
+                let val_bits = val.to_bits(layout.size)?;
+                let raw_shift = self.read_scalar(&args[1])?.check_init()?;
+                let raw_shift_bits = raw_shift.to_bits(layout.size)?;
+                let width_bits = u128::from(layout.size.bits());
+                let shift_bits = raw_shift_bits % width_bits;
+                let inv_shift_bits = (width_bits - shift_bits) % width_bits;
+                let result_bits = if intrinsic_name == sym::rotate_left {
+                    (val_bits << shift_bits) | (val_bits >> inv_shift_bits)
+                } else {
+                    (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
+                };
+                let truncated_bits = self.truncate(result_bits, layout);
+                let result = Scalar::from_uint(truncated_bits, layout.size);
+                self.write_scalar(result, dest)?;
+            }
+            sym::copy => {
+                self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
+            }
+            sym::offset => {
+                let ptr = self.read_pointer(&args[0])?;
+                let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
+                let pointee_ty = substs.type_at(0);
+
+                let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
+                self.write_pointer(offset_ptr, dest)?;
+            }
+            sym::arith_offset => {
+                let ptr = self.read_pointer(&args[0])?;
+                let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
+                let pointee_ty = substs.type_at(0);
+
+                let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
+                let offset_bytes = offset_count.wrapping_mul(pointee_size);
+                let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self);
+                self.write_pointer(offset_ptr, dest)?;
+            }
+            sym::ptr_offset_from => {
+                let a = self.read_immediate(&args[0])?.to_scalar()?;
+                let b = self.read_immediate(&args[1])?.to_scalar()?;
+
+                // Special case: if both scalars are *equal integers*
+                // and not null, we pretend there is an allocation of size 0 right there,
+                // and their offset is 0. (There's never a valid object at null, making it an
+                // exception from the exception.)
+                // This is the dual to the special exception for offset-by-0
+                // in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`).
+                //
+                // Control flow is weird because we cannot early-return (to reach the
+                // `go_to_block` at the end).
+                let done = if let (Ok(a), Ok(b)) = (a.try_to_int(), b.try_to_int()) {
+                    let a = a.try_to_machine_usize(*self.tcx).unwrap();
+                    let b = b.try_to_machine_usize(*self.tcx).unwrap();
+                    if a == b && a != 0 {
+                        self.write_scalar(Scalar::from_machine_isize(0, self), dest)?;
+                        true
+                    } else {
+                        false
+                    }
+                } else {
+                    false
+                };
+
+                if !done {
+                    // General case: we need two pointers.
+                    let a = self.scalar_to_ptr(a);
+                    let b = self.scalar_to_ptr(b);
+                    let (a_alloc_id, a_offset, _) = self.memory.ptr_get_alloc(a)?;
+                    let (b_alloc_id, b_offset, _) = self.memory.ptr_get_alloc(b)?;
+                    if a_alloc_id != b_alloc_id {
+                        throw_ub_format!(
+                            "ptr_offset_from cannot compute offset of pointers into different \
+                            allocations.",
+                        );
+                    }
+                    let usize_layout = self.layout_of(self.tcx.types.usize)?;
+                    let isize_layout = self.layout_of(self.tcx.types.isize)?;
+                    let a_offset = ImmTy::from_uint(a_offset.bytes(), usize_layout);
+                    let b_offset = ImmTy::from_uint(b_offset.bytes(), usize_layout);
+                    let (val, _overflowed, _ty) =
+                        self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?;
+                    let pointee_layout = self.layout_of(substs.type_at(0))?;
+                    let val = ImmTy::from_scalar(val, isize_layout);
+                    let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
+                    self.exact_div(&val, &size, dest)?;
+                }
+            }
+
+            sym::transmute => {
+                self.copy_op_transmute(&args[0], dest)?;
+            }
+            sym::assert_inhabited => {
+                let ty = instance.substs.type_at(0);
+                let layout = self.layout_of(ty)?;
+
+                if layout.abi.is_uninhabited() {
+                    // The run-time intrinsic panics just to get a good backtrace; here we abort
+                    // since there is no problem showing a backtrace even for aborts.
+                    M::abort(
+                        self,
+                        format!(
+                            "aborted execution: attempted to instantiate uninhabited type `{}`",
+                            ty
+                        ),
+                    )?;
+                }
+            }
+            sym::simd_insert => {
+                let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
+                let elem = &args[2];
+                let input = &args[0];
+                let (len, e_ty) = input.layout.ty.simd_size_and_type(*self.tcx);
+                assert!(
+                    index < len,
+                    "Index `{}` must be in bounds of vector type `{}`: `[0, {})`",
+                    index,
+                    e_ty,
+                    len
+                );
+                assert_eq!(
+                    input.layout, dest.layout,
+                    "Return type `{}` must match vector type `{}`",
+                    dest.layout.ty, input.layout.ty
+                );
+                assert_eq!(
+                    elem.layout.ty, e_ty,
+                    "Scalar element type `{}` must match vector element type `{}`",
+                    elem.layout.ty, e_ty
+                );
+
+                for i in 0..len {
+                    let place = self.place_index(dest, i)?;
+                    let value = if i == index { *elem } else { self.operand_index(input, i)? };
+                    self.copy_op(&value, &place)?;
+                }
+            }
+            sym::simd_extract => {
+                let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
+                let (len, e_ty) = args[0].layout.ty.simd_size_and_type(*self.tcx);
+                assert!(
+                    index < len,
+                    "index `{}` is out-of-bounds of vector type `{}` with length `{}`",
+                    index,
+                    e_ty,
+                    len
+                );
+                assert_eq!(
+                    e_ty, dest.layout.ty,
+                    "Return type `{}` must match vector element type `{}`",
+                    dest.layout.ty, e_ty
+                );
+                self.copy_op(&self.operand_index(&args[0], index)?, dest)?;
+            }
+            sym::likely | sym::unlikely | sym::black_box => {
+                // These just return their argument
+                self.copy_op(&args[0], dest)?;
+            }
+            sym::assume => {
+                let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?;
+                if !cond {
+                    throw_ub_format!("`assume` intrinsic called with `false`");
+                }
+            }
+            sym::raw_eq => {
+                let result = self.raw_eq_intrinsic(&args[0], &args[1])?;
+                self.write_scalar(result, dest)?;
+            }
+            _ => return Ok(false),
+        }
+
+        trace!("{:?}", self.dump_place(**dest));
+        self.go_to_block(ret);
+        Ok(true)
+    }
+
+    pub fn exact_div(
+        &mut self,
+        a: &ImmTy<'tcx, M::PointerTag>,
+        b: &ImmTy<'tcx, M::PointerTag>,
+        dest: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        // Performs an exact division, resulting in undefined behavior where
+        // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
+        // First, check x % y != 0 (or if that computation overflows).
+        let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
+        if overflow || res.assert_bits(a.layout.size) != 0 {
+            // Then, check if `b` is -1, which is the "MIN / -1" case.
+            let minus1 = Scalar::from_int(-1, dest.layout.size);
+            let b_scalar = b.to_scalar().unwrap();
+            if b_scalar == minus1 {
+                throw_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
+            } else {
+                throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b,)
+            }
+        }
+        // `Rem` says this is all right, so we can let `Div` do its job.
+        self.binop_ignore_overflow(BinOp::Div, &a, &b, dest)
+    }
+
+    /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
+    /// allocation. For integer pointers, we consider each of them their own tiny allocation of size
+    /// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value.
+    pub fn ptr_offset_inbounds(
+        &self,
+        ptr: Pointer<Option<M::PointerTag>>,
+        pointee_ty: Ty<'tcx>,
+        offset_count: i64,
+    ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
+        // We cannot overflow i64 as a type's size must be <= isize::MAX.
+        let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
+        // The computed offset, in bytes, cannot overflow an isize.
+        let offset_bytes =
+            offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
+        // The offset being in bounds cannot rely on "wrapping around" the address space.
+        // So, first rule out overflows in the pointer arithmetic.
+        let offset_ptr = ptr.signed_offset(offset_bytes, self)?;
+        // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
+        // memory between these pointers must be accessible. Note that we do not require the
+        // pointers to be properly aligned (unlike a read/write operation).
+        let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
+        let size = offset_bytes.unsigned_abs();
+        // This call handles checking for integer/null pointers.
+        self.memory.check_ptr_access_align(
+            min_ptr,
+            Size::from_bytes(size),
+            Align::ONE,
+            CheckInAllocMsg::PointerArithmeticTest,
+        )?;
+        Ok(offset_ptr)
+    }
+
+    /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
+    pub(crate) fn copy_intrinsic(
+        &mut self,
+        src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
+        dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
+        count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
+        nonoverlapping: bool,
+    ) -> InterpResult<'tcx> {
+        let count = self.read_scalar(&count)?.to_machine_usize(self)?;
+        let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
+        let (size, align) = (layout.size, layout.align.abi);
+        let size = size.checked_mul(count, self).ok_or_else(|| {
+            err_ub_format!(
+                "overflow computing total size of `{}`",
+                if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
+            )
+        })?;
+
+        let src = self.read_pointer(&src)?;
+        let dst = self.read_pointer(&dst)?;
+
+        self.memory.copy(src, align, dst, align, size, nonoverlapping)
+    }
+
+    pub(crate) fn raw_eq_intrinsic(
+        &mut self,
+        lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
+        rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
+    ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
+        let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
+        assert!(!layout.is_unsized());
+
+        let lhs = self.read_pointer(lhs)?;
+        let rhs = self.read_pointer(rhs)?;
+        let lhs_bytes = self.memory.read_bytes(lhs, layout.size)?;
+        let rhs_bytes = self.memory.read_bytes(rhs, layout.size)?;
+        Ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
new file mode 100644
index 00000000000..d4cbba18029
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
@@ -0,0 +1,123 @@
+use std::convert::TryFrom;
+
+use rustc_ast::Mutability;
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::mir::TerminatorKind;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::subst::Subst;
+use rustc_span::{Span, Symbol};
+
+use crate::interpret::{
+    intrinsics::{InterpCx, Machine},
+    MPlaceTy, MemoryKind, Scalar,
+};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Walks up the callstack from the intrinsic's callsite, searching for the first callsite in a
+    /// frame which is not `#[track_caller]`.
+    crate fn find_closest_untracked_caller_location(&self) -> Span {
+        for frame in self.stack().iter().rev() {
+            debug!("find_closest_untracked_caller_location: checking frame {:?}", frame.instance);
+
+            // Assert that the frame we look at is actually executing code currently
+            // (`loc` is `Err` when we are unwinding and the frame does not require cleanup).
+            let loc = frame.loc.unwrap();
+
+            // This could be a non-`Call` terminator (such as `Drop`), or not a terminator at all
+            // (such as `box`). Use the normal span by default.
+            let mut source_info = *frame.body.source_info(loc);
+
+            // If this is a `Call` terminator, use the `fn_span` instead.
+            let block = &frame.body.basic_blocks()[loc.block];
+            if loc.statement_index == block.statements.len() {
+                debug!(
+                    "find_closest_untracked_caller_location: got terminator {:?} ({:?})",
+                    block.terminator(),
+                    block.terminator().kind
+                );
+                if let TerminatorKind::Call { fn_span, .. } = block.terminator().kind {
+                    source_info.span = fn_span;
+                }
+            }
+
+            // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
+            // If so, the starting `source_info.span` is in the innermost inlined
+            // function, and will be replaced with outer callsite spans as long
+            // as the inlined functions were `#[track_caller]`.
+            loop {
+                let scope_data = &frame.body.source_scopes[source_info.scope];
+
+                if let Some((callee, callsite_span)) = scope_data.inlined {
+                    // Stop inside the most nested non-`#[track_caller]` function,
+                    // before ever reaching its caller (which is irrelevant).
+                    if !callee.def.requires_caller_location(*self.tcx) {
+                        return source_info.span;
+                    }
+                    source_info.span = callsite_span;
+                }
+
+                // Skip past all of the parents with `inlined: None`.
+                match scope_data.inlined_parent_scope {
+                    Some(parent) => source_info.scope = parent,
+                    None => break,
+                }
+            }
+
+            // Stop inside the most nested non-`#[track_caller]` function,
+            // before ever reaching its caller (which is irrelevant).
+            if !frame.instance.def.requires_caller_location(*self.tcx) {
+                return source_info.span;
+            }
+        }
+
+        bug!("no non-`#[track_caller]` frame found")
+    }
+
+    /// Allocate a `const core::panic::Location` with the provided filename and line/column numbers.
+    crate fn alloc_caller_location(
+        &mut self,
+        filename: Symbol,
+        line: u32,
+        col: u32,
+    ) -> MPlaceTy<'tcx, M::PointerTag> {
+        let file =
+            self.allocate_str(&filename.as_str(), MemoryKind::CallerLocation, Mutability::Not);
+        let line = Scalar::from_u32(line);
+        let col = Scalar::from_u32(col);
+
+        // Allocate memory for `CallerLocation` struct.
+        let loc_ty = self
+            .tcx
+            .type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None))
+            .subst(*self.tcx, self.tcx.mk_substs([self.tcx.lifetimes.re_erased.into()].iter()));
+        let loc_layout = self.layout_of(loc_ty).unwrap();
+        // This can fail if rustc runs out of memory right here. Trying to emit an error would be
+        // pointless, since that would require allocating more memory than a Location.
+        let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
+
+        // Initialize fields.
+        self.write_immediate(file.to_ref(self), &self.mplace_field(&location, 0).unwrap().into())
+            .expect("writing to memory we just allocated cannot fail");
+        self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into())
+            .expect("writing to memory we just allocated cannot fail");
+        self.write_scalar(col, &self.mplace_field(&location, 2).unwrap().into())
+            .expect("writing to memory we just allocated cannot fail");
+
+        location
+    }
+
+    crate fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32) {
+        let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+        let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
+        (
+            Symbol::intern(&caller.file.name.prefer_remapped().to_string_lossy()),
+            u32::try_from(caller.line).unwrap(),
+            u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
+        )
+    }
+
+    pub fn alloc_caller_location_for_span(&mut self, span: Span) -> MPlaceTy<'tcx, M::PointerTag> {
+        let (file, line, column) = self.location_triple_for_span(span);
+        self.alloc_caller_location(file, line, column)
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs
new file mode 100644
index 00000000000..a7012cd63f3
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs
@@ -0,0 +1,197 @@
+use rustc_hir::def_id::CrateNum;
+use rustc_hir::definitions::DisambiguatedDefPathData;
+use rustc_middle::mir::interpret::Allocation;
+use rustc_middle::ty::{
+    self,
+    print::{PrettyPrinter, Print, Printer},
+    subst::{GenericArg, GenericArgKind},
+    Ty, TyCtxt,
+};
+use std::fmt::Write;
+
+struct AbsolutePathPrinter<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    path: String,
+}
+
+impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
+    type Error = std::fmt::Error;
+
+    type Path = Self;
+    type Region = Self;
+    type Type = Self;
+    type DynExistential = Self;
+    type Const = Self;
+
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn print_region(self, _region: ty::Region<'_>) -> Result<Self::Region, Self::Error> {
+        Ok(self)
+    }
+
+    fn print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+        match *ty.kind() {
+            // Types without identity.
+            ty::Bool
+            | ty::Char
+            | ty::Int(_)
+            | ty::Uint(_)
+            | ty::Float(_)
+            | ty::Str
+            | ty::Array(_, _)
+            | ty::Slice(_)
+            | ty::RawPtr(_)
+            | ty::Ref(_, _, _)
+            | ty::FnPtr(_)
+            | ty::Never
+            | ty::Tuple(_)
+            | ty::Dynamic(_, _) => self.pretty_print_type(ty),
+
+            // Placeholders (all printed as `_` to uniformize them).
+            ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error(_) => {
+                write!(self, "_")?;
+                Ok(self)
+            }
+
+            // Types with identity (print the module path).
+            ty::Adt(&ty::AdtDef { did: def_id, .. }, substs)
+            | ty::FnDef(def_id, substs)
+            | ty::Opaque(def_id, substs)
+            | ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs })
+            | ty::Closure(def_id, substs)
+            | ty::Generator(def_id, substs, _) => self.print_def_path(def_id, substs),
+            ty::Foreign(def_id) => self.print_def_path(def_id, &[]),
+
+            ty::GeneratorWitness(_) => bug!("type_name: unexpected `GeneratorWitness`"),
+        }
+    }
+
+    fn print_const(self, ct: &'tcx ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
+        self.pretty_print_const(ct, false)
+    }
+
+    fn print_dyn_existential(
+        mut self,
+        predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+    ) -> Result<Self::DynExistential, Self::Error> {
+        let mut first = true;
+        for p in predicates {
+            if !first {
+                write!(self, "+")?;
+            }
+            first = false;
+            self = p.print(self)?;
+        }
+        Ok(self)
+    }
+
+    fn path_crate(mut self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
+        self.path.push_str(&self.tcx.crate_name(cnum).as_str());
+        Ok(self)
+    }
+
+    fn path_qualified(
+        self,
+        self_ty: Ty<'tcx>,
+        trait_ref: Option<ty::TraitRef<'tcx>>,
+    ) -> Result<Self::Path, Self::Error> {
+        self.pretty_path_qualified(self_ty, trait_ref)
+    }
+
+    fn path_append_impl(
+        self,
+        print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+        _disambiguated_data: &DisambiguatedDefPathData,
+        self_ty: Ty<'tcx>,
+        trait_ref: Option<ty::TraitRef<'tcx>>,
+    ) -> Result<Self::Path, Self::Error> {
+        self.pretty_path_append_impl(
+            |mut cx| {
+                cx = print_prefix(cx)?;
+
+                cx.path.push_str("::");
+
+                Ok(cx)
+            },
+            self_ty,
+            trait_ref,
+        )
+    }
+
+    fn path_append(
+        mut self,
+        print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+        disambiguated_data: &DisambiguatedDefPathData,
+    ) -> Result<Self::Path, Self::Error> {
+        self = print_prefix(self)?;
+
+        write!(self.path, "::{}", disambiguated_data.data).unwrap();
+
+        Ok(self)
+    }
+
+    fn path_generic_args(
+        mut self,
+        print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+        args: &[GenericArg<'tcx>],
+    ) -> Result<Self::Path, Self::Error> {
+        self = print_prefix(self)?;
+        let args = args.iter().cloned().filter(|arg| match arg.unpack() {
+            GenericArgKind::Lifetime(_) => false,
+            _ => true,
+        });
+        if args.clone().next().is_some() {
+            self.generic_delimiters(|cx| cx.comma_sep(args))
+        } else {
+            Ok(self)
+        }
+    }
+}
+
+impl PrettyPrinter<'tcx> for AbsolutePathPrinter<'tcx> {
+    fn region_should_not_be_omitted(&self, _region: ty::Region<'_>) -> bool {
+        false
+    }
+    fn comma_sep<T>(mut self, mut elems: impl Iterator<Item = T>) -> Result<Self, Self::Error>
+    where
+        T: Print<'tcx, Self, Output = Self, Error = Self::Error>,
+    {
+        if let Some(first) = elems.next() {
+            self = first.print(self)?;
+            for elem in elems {
+                self.path.push_str(", ");
+                self = elem.print(self)?;
+            }
+        }
+        Ok(self)
+    }
+
+    fn generic_delimiters(
+        mut self,
+        f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+    ) -> Result<Self, Self::Error> {
+        write!(self, "<")?;
+
+        self = f(self)?;
+
+        write!(self, ">")?;
+
+        Ok(self)
+    }
+}
+
+impl Write for AbsolutePathPrinter<'_> {
+    fn write_str(&mut self, s: &str) -> std::fmt::Result {
+        self.path.push_str(s);
+        Ok(())
+    }
+}
+
+/// Directly returns an `Allocation` containing an absolute path representation of the given type.
+crate fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> &'tcx Allocation {
+    let path = AbsolutePathPrinter { tcx, path: String::new() }.print_type(ty).unwrap().path;
+    let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes());
+    tcx.intern_const_alloc(alloc)
+}
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
new file mode 100644
index 00000000000..323e102b872
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -0,0 +1,479 @@
+//! This module contains everything needed to instantiate an interpreter.
+//! This separation exists to ensure that no fancy miri features like
+//! interpreting common C functions leak into CTFE.
+
+use std::borrow::{Borrow, Cow};
+use std::fmt::Debug;
+use std::hash::Hash;
+
+use rustc_middle::mir;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::def_id::DefId;
+use rustc_target::abi::Size;
+use rustc_target::spec::abi::Abi;
+
+use super::{
+    AllocId, AllocRange, Allocation, Frame, ImmTy, InterpCx, InterpResult, LocalValue, MemPlace,
+    Memory, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind,
+};
+
+/// Data returned by Machine::stack_pop,
+/// to provide further control over the popping of the stack frame
+#[derive(Eq, PartialEq, Debug, Copy, Clone)]
+pub enum StackPopJump {
+    /// Indicates that no special handling should be
+    /// done - we'll either return normally or unwind
+    /// based on the terminator for the function
+    /// we're leaving.
+    Normal,
+
+    /// Indicates that we should *not* jump to the return/unwind address, as the callback already
+    /// took care of everything.
+    NoJump,
+}
+
+/// Whether this kind of memory is allowed to leak
+pub trait MayLeak: Copy {
+    fn may_leak(self) -> bool;
+}
+
+/// The functionality needed by memory to manage its allocations
+pub trait AllocMap<K: Hash + Eq, V> {
+    /// Tests if the map contains the given key.
+    /// Deliberately takes `&mut` because that is sufficient, and some implementations
+    /// can be more efficient then (using `RefCell::get_mut`).
+    fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
+    where
+        K: Borrow<Q>;
+
+    /// Inserts a new entry into the map.
+    fn insert(&mut self, k: K, v: V) -> Option<V>;
+
+    /// Removes an entry from the map.
+    fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
+    where
+        K: Borrow<Q>;
+
+    /// Returns data based on the keys and values in the map.
+    fn filter_map_collect<T>(&self, f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T>;
+
+    /// Returns a reference to entry `k`. If no such entry exists, call
+    /// `vacant` and either forward its error, or add its result to the map
+    /// and return a reference to *that*.
+    fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E>;
+
+    /// Returns a mutable reference to entry `k`. If no such entry exists, call
+    /// `vacant` and either forward its error, or add its result to the map
+    /// and return a reference to *that*.
+    fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E>;
+
+    /// Read-only lookup.
+    fn get(&self, k: K) -> Option<&V> {
+        self.get_or(k, || Err(())).ok()
+    }
+
+    /// Mutable lookup.
+    fn get_mut(&mut self, k: K) -> Option<&mut V> {
+        self.get_mut_or(k, || Err(())).ok()
+    }
+}
+
+/// Methods of this trait signifies a point where CTFE evaluation would fail
+/// and some use case dependent behaviour can instead be applied.
+pub trait Machine<'mir, 'tcx>: Sized {
+    /// Additional memory kinds a machine wishes to distinguish from the builtin ones
+    type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static;
+
+    /// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
+    type PointerTag: Provenance + Eq + Hash + 'static;
+
+    /// Machines can define extra (non-instance) things that represent values of function pointers.
+    /// For example, Miri uses this to return a function pointer from `dlsym`
+    /// that can later be called to execute the right thing.
+    type ExtraFnVal: Debug + Copy;
+
+    /// Extra data stored in every call frame.
+    type FrameExtra;
+
+    /// Extra data stored in memory. A reference to this is available when `AllocExtra`
+    /// gets initialized, so you can e.g., have an `Rc` here if there is global state you
+    /// need access to in the `AllocExtra` hooks.
+    type MemoryExtra;
+
+    /// Extra data stored in every allocation.
+    type AllocExtra: Debug + Clone + 'static;
+
+    /// Memory's allocation map
+    type MemoryMap: AllocMap<
+            AllocId,
+            (MemoryKind<Self::MemoryKind>, Allocation<Self::PointerTag, Self::AllocExtra>),
+        > + Default
+        + Clone;
+
+    /// The memory kind to use for copied global memory (held in `tcx`) --
+    /// or None if such memory should not be mutated and thus any such attempt will cause
+    /// a `ModifiedStatic` error to be raised.
+    /// Statics are copied under two circumstances: When they are mutated, and when
+    /// `tag_allocation` (see below) returns an owned allocation
+    /// that is added to the memory so that the work is not done twice.
+    const GLOBAL_KIND: Option<Self::MemoryKind>;
+
+    /// Should the machine panic on allocation failures?
+    const PANIC_ON_ALLOC_FAIL: bool;
+
+    /// Whether memory accesses should be alignment-checked.
+    fn enforce_alignment(memory_extra: &Self::MemoryExtra) -> bool;
+
+    /// Whether, when checking alignment, we should `force_int` and thus support
+    /// custom alignment logic based on whatever the integer address happens to be.
+    fn force_int_for_alignment_check(memory_extra: &Self::MemoryExtra) -> bool;
+
+    /// Whether to enforce the validity invariant
+    fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+
+    /// Whether function calls should be [ABI](Abi)-checked.
+    fn enforce_abi(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+        true
+    }
+
+    /// Entry point for obtaining the MIR of anything that should get evaluated.
+    /// So not just functions and shims, but also const/static initializers, anonymous
+    /// constants, ...
+    fn load_mir(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        instance: ty::InstanceDef<'tcx>,
+    ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+        Ok(ecx.tcx.instance_mir(instance))
+    }
+
+    /// Entry point to all function calls.
+    ///
+    /// Returns either the mir to use for the call, or `None` if execution should
+    /// just proceed (which usually means this hook did all the work that the
+    /// called function should usually have done). In the latter case, it is
+    /// this hook's responsibility to advance the instruction pointer!
+    /// (This is to support functions like `__rust_maybe_catch_panic` that neither find a MIR
+    /// nor just jump to `ret`, but instead push their own stack frame.)
+    /// Passing `dest`and `ret` in the same `Option` proved very annoying when only one of them
+    /// was used.
+    fn find_mir_or_eval_fn(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        abi: Abi,
+        args: &[OpTy<'tcx, Self::PointerTag>],
+        ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
+        unwind: StackPopUnwind,
+    ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>>;
+
+    /// Execute `fn_val`.  It is the hook's responsibility to advance the instruction
+    /// pointer as appropriate.
+    fn call_extra_fn(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        fn_val: Self::ExtraFnVal,
+        abi: Abi,
+        args: &[OpTy<'tcx, Self::PointerTag>],
+        ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
+        unwind: StackPopUnwind,
+    ) -> InterpResult<'tcx>;
+
+    /// Directly process an intrinsic without pushing a stack frame. It is the hook's
+    /// responsibility to advance the instruction pointer as appropriate.
+    fn call_intrinsic(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx, Self::PointerTag>],
+        ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
+        unwind: StackPopUnwind,
+    ) -> InterpResult<'tcx>;
+
+    /// Called to evaluate `Assert` MIR terminators that trigger a panic.
+    fn assert_panic(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        msg: &mir::AssertMessage<'tcx>,
+        unwind: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx>;
+
+    /// Called to evaluate `Abort` MIR terminator.
+    fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _msg: String) -> InterpResult<'tcx, !> {
+        throw_unsup_format!("aborting execution is not supported")
+    }
+
+    /// Called for all binary operations where the LHS has pointer type.
+    ///
+    /// Returns a (value, overflowed) pair if the operation succeeded
+    fn binary_ptr_op(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        bin_op: mir::BinOp,
+        left: &ImmTy<'tcx, Self::PointerTag>,
+        right: &ImmTy<'tcx, Self::PointerTag>,
+    ) -> InterpResult<'tcx, (Scalar<Self::PointerTag>, bool, Ty<'tcx>)>;
+
+    /// Heap allocations via the `box` keyword.
+    fn box_alloc(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        dest: &PlaceTy<'tcx, Self::PointerTag>,
+    ) -> InterpResult<'tcx>;
+
+    /// Called to read the specified `local` from the `frame`.
+    /// Since reading a ZST is not actually accessing memory or locals, this is never invoked
+    /// for ZST reads.
+    #[inline]
+    fn access_local(
+        _ecx: &InterpCx<'mir, 'tcx, Self>,
+        frame: &Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
+        local: mir::Local,
+    ) -> InterpResult<'tcx, Operand<Self::PointerTag>> {
+        frame.locals[local].access()
+    }
+
+    /// Called to write the specified `local` from the `frame`.
+    /// Since writing a ZST is not actually accessing memory or locals, this is never invoked
+    /// for ZST reads.
+    #[inline]
+    fn access_local_mut<'a>(
+        ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+        frame: usize,
+        local: mir::Local,
+    ) -> InterpResult<'tcx, Result<&'a mut LocalValue<Self::PointerTag>, MemPlace<Self::PointerTag>>>
+    where
+        'tcx: 'mir,
+    {
+        ecx.stack_mut()[frame].locals[local].access_mut()
+    }
+
+    /// Called before a basic block terminator is executed.
+    /// You can use this to detect endlessly running programs.
+    #[inline]
+    fn before_terminator(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Called before a global allocation is accessed.
+    /// `def_id` is `Some` if this is the "lazy" allocation of a static.
+    #[inline]
+    fn before_access_global(
+        _memory_extra: &Self::MemoryExtra,
+        _alloc_id: AllocId,
+        _allocation: &Allocation,
+        _static_def_id: Option<DefId>,
+        _is_write: bool,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Return the `AllocId` for the given thread-local static in the current thread.
+    fn thread_local_static_base_pointer(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        def_id: DefId,
+    ) -> InterpResult<'tcx, Pointer<Self::PointerTag>> {
+        throw_unsup!(ThreadLocalStatic(def_id))
+    }
+
+    /// Return the root pointer for the given `extern static`.
+    fn extern_static_base_pointer(
+        mem: &Memory<'mir, 'tcx, Self>,
+        def_id: DefId,
+    ) -> InterpResult<'tcx, Pointer<Self::PointerTag>>;
+
+    /// Return a "base" pointer for the given allocation: the one that is used for direct
+    /// accesses to this static/const/fn allocation, or the one returned from the heap allocator.
+    ///
+    /// Not called on `extern` or thread-local statics (those use the methods above).
+    fn tag_alloc_base_pointer(
+        mem: &Memory<'mir, 'tcx, Self>,
+        ptr: Pointer,
+    ) -> Pointer<Self::PointerTag>;
+
+    /// "Int-to-pointer cast"
+    fn ptr_from_addr(
+        mem: &Memory<'mir, 'tcx, Self>,
+        addr: u64,
+    ) -> Pointer<Option<Self::PointerTag>>;
+
+    /// Convert a pointer with provenance into an allocation-offset pair.
+    fn ptr_get_alloc(
+        mem: &Memory<'mir, 'tcx, Self>,
+        ptr: Pointer<Self::PointerTag>,
+    ) -> (AllocId, Size);
+
+    /// Called to initialize the "extra" state of an allocation and make the pointers
+    /// it contains (in relocations) tagged.  The way we construct allocations is
+    /// to always first construct it without extra and then add the extra.
+    /// This keeps uniform code paths for handling both allocations created by CTFE
+    /// for globals, and allocations created by Miri during evaluation.
+    ///
+    /// `kind` is the kind of the allocation being tagged; it can be `None` when
+    /// it's a global and `GLOBAL_KIND` is `None`.
+    ///
+    /// This should avoid copying if no work has to be done! If this returns an owned
+    /// allocation (because a copy had to be done to add tags or metadata), machine memory will
+    /// cache the result. (This relies on `AllocMap::get_or` being able to add the
+    /// owned allocation to the map even when the map is shared.)
+    fn init_allocation_extra<'b>(
+        mem: &Memory<'mir, 'tcx, Self>,
+        id: AllocId,
+        alloc: Cow<'b, Allocation>,
+        kind: Option<MemoryKind<Self::MemoryKind>>,
+    ) -> Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>>;
+
+    /// Hook for performing extra checks on a memory read access.
+    ///
+    /// Takes read-only access to the allocation so we can keep all the memory read
+    /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
+    /// need to mutate.
+    #[inline(always)]
+    fn memory_read(
+        _memory_extra: &Self::MemoryExtra,
+        _alloc_extra: &Self::AllocExtra,
+        _tag: Self::PointerTag,
+        _range: AllocRange,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Hook for performing extra checks on a memory write access.
+    #[inline(always)]
+    fn memory_written(
+        _memory_extra: &mut Self::MemoryExtra,
+        _alloc_extra: &mut Self::AllocExtra,
+        _tag: Self::PointerTag,
+        _range: AllocRange,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Hook for performing extra operations on a memory deallocation.
+    #[inline(always)]
+    fn memory_deallocated(
+        _memory_extra: &mut Self::MemoryExtra,
+        _alloc_extra: &mut Self::AllocExtra,
+        _tag: Self::PointerTag,
+        _range: AllocRange,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Executes a retagging operation.
+    #[inline]
+    fn retag(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _kind: mir::RetagKind,
+        _place: &PlaceTy<'tcx, Self::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Called immediately before a new stack frame gets pushed.
+    fn init_frame_extra(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        frame: Frame<'mir, 'tcx, Self::PointerTag>,
+    ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>;
+
+    /// Borrow the current thread's stack.
+    fn stack(
+        ecx: &'a InterpCx<'mir, 'tcx, Self>,
+    ) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>];
+
+    /// Mutably borrow the current thread's stack.
+    fn stack_mut(
+        ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+    ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>;
+
+    /// Called immediately after a stack frame got pushed and its locals got initialized.
+    fn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Called immediately after a stack frame got popped, but before jumping back to the caller.
+    fn after_stack_pop(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _frame: Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
+        _unwinding: bool,
+    ) -> InterpResult<'tcx, StackPopJump> {
+        // By default, we do not support unwinding from panics
+        Ok(StackPopJump::Normal)
+    }
+}
+
+// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
+// (CTFE and ConstProp) use the same instance.  Here, we share that code.
+pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
+    type PointerTag = AllocId;
+    type ExtraFnVal = !;
+
+    type MemoryMap =
+        rustc_data_structures::fx::FxHashMap<AllocId, (MemoryKind<Self::MemoryKind>, Allocation)>;
+    const GLOBAL_KIND: Option<Self::MemoryKind> = None; // no copying of globals from `tcx` to machine memory
+
+    type AllocExtra = ();
+    type FrameExtra = ();
+
+    #[inline(always)]
+    fn enforce_alignment(_memory_extra: &Self::MemoryExtra) -> bool {
+        // We do not check for alignment to avoid having to carry an `Align`
+        // in `ConstValue::ByRef`.
+        false
+    }
+
+    #[inline(always)]
+    fn force_int_for_alignment_check(_memory_extra: &Self::MemoryExtra) -> bool {
+        // We do not support `force_int`.
+        false
+    }
+
+    #[inline(always)]
+    fn enforce_validity(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+        false // for now, we don't enforce validity
+    }
+
+    #[inline(always)]
+    fn call_extra_fn(
+        _ecx: &mut InterpCx<$mir, $tcx, Self>,
+        fn_val: !,
+        _abi: Abi,
+        _args: &[OpTy<$tcx>],
+        _ret: Option<(&PlaceTy<$tcx>, mir::BasicBlock)>,
+        _unwind: StackPopUnwind,
+    ) -> InterpResult<$tcx> {
+        match fn_val {}
+    }
+
+    #[inline(always)]
+    fn init_allocation_extra<'b>(
+        _mem: &Memory<$mir, $tcx, Self>,
+        _id: AllocId,
+        alloc: Cow<'b, Allocation>,
+        _kind: Option<MemoryKind<Self::MemoryKind>>,
+    ) -> Cow<'b, Allocation<Self::PointerTag>> {
+        // We do not use a tag so we can just cheaply forward the allocation
+        alloc
+    }
+
+    fn extern_static_base_pointer(
+        mem: &Memory<$mir, $tcx, Self>,
+        def_id: DefId,
+    ) -> InterpResult<$tcx, Pointer> {
+        // Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
+        Ok(Pointer::new(mem.tcx.create_static_alloc(def_id), Size::ZERO))
+    }
+
+    #[inline(always)]
+    fn tag_alloc_base_pointer(
+        _mem: &Memory<$mir, $tcx, Self>,
+        ptr: Pointer<AllocId>,
+    ) -> Pointer<AllocId> {
+        ptr
+    }
+
+    #[inline(always)]
+    fn ptr_from_addr(_mem: &Memory<$mir, $tcx, Self>, addr: u64) -> Pointer<Option<AllocId>> {
+        Pointer::new(None, Size::from_bytes(addr))
+    }
+
+    #[inline(always)]
+    fn ptr_get_alloc(_mem: &Memory<$mir, $tcx, Self>, ptr: Pointer<AllocId>) -> (AllocId, Size) {
+        // We know `offset` is relative to the allocation, so we can use `into_parts`.
+        let (alloc_id, offset) = ptr.into_parts();
+        (alloc_id, offset)
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
new file mode 100644
index 00000000000..b8b6ff93753
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -0,0 +1,1186 @@
+//! The memory subsystem.
+//!
+//! Generally, we use `Pointer` to denote memory addresses. However, some operations
+//! have a "size"-like parameter, and they take `Scalar` for the address because
+//! if the size is 0, then the pointer can also be a (properly aligned, non-null)
+//! integer. It is crucial that these operations call `check_align` *before*
+//! short-circuiting the empty case!
+
+use std::assert_matches::assert_matches;
+use std::borrow::Cow;
+use std::collections::VecDeque;
+use std::convert::TryFrom;
+use std::fmt;
+use std::ptr;
+
+use rustc_ast::Mutability;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_middle::mir::display_allocation;
+use rustc_middle::ty::{Instance, ParamEnv, TyCtxt};
+use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout};
+
+use super::{
+    alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc,
+    InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar,
+    ScalarMaybeUninit,
+};
+
+#[derive(Debug, PartialEq, Copy, Clone)]
+pub enum MemoryKind<T> {
+    /// Stack memory. Error if deallocated except during a stack pop.
+    Stack,
+    /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
+    CallerLocation,
+    /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
+    Machine(T),
+}
+
+impl<T: MayLeak> MayLeak for MemoryKind<T> {
+    #[inline]
+    fn may_leak(self) -> bool {
+        match self {
+            MemoryKind::Stack => false,
+            MemoryKind::CallerLocation => true,
+            MemoryKind::Machine(k) => k.may_leak(),
+        }
+    }
+}
+
+impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            MemoryKind::Stack => write!(f, "stack variable"),
+            MemoryKind::CallerLocation => write!(f, "caller location"),
+            MemoryKind::Machine(m) => write!(f, "{}", m),
+        }
+    }
+}
+
+/// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
+#[derive(Debug, Copy, Clone)]
+pub enum AllocCheck {
+    /// Allocation must be live and not a function pointer.
+    Dereferenceable,
+    /// Allocations needs to be live, but may be a function pointer.
+    Live,
+    /// Allocation may be dead.
+    MaybeDead,
+}
+
+/// The value of a function pointer.
+#[derive(Debug, Copy, Clone)]
+pub enum FnVal<'tcx, Other> {
+    Instance(Instance<'tcx>),
+    Other(Other),
+}
+
+impl<'tcx, Other> FnVal<'tcx, Other> {
+    pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
+        match self {
+            FnVal::Instance(instance) => Ok(instance),
+            FnVal::Other(_) => {
+                throw_unsup_format!("'foreign' function pointers are not supported in this context")
+            }
+        }
+    }
+}
+
+// `Memory` has to depend on the `Machine` because some of its operations
+// (e.g., `get`) call a `Machine` hook.
+pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    /// Allocations local to this instance of the miri engine. The kind
+    /// helps ensure that the same mechanism is used for allocation and
+    /// deallocation. When an allocation is not found here, it is a
+    /// global and looked up in the `tcx` for read access. Some machines may
+    /// have to mutate this map even on a read-only access to a global (because
+    /// they do pointer provenance tracking and the allocations in `tcx` have
+    /// the wrong type), so we let the machine override this type.
+    /// Either way, if the machine allows writing to a global, doing so will
+    /// create a copy of the global allocation here.
+    // FIXME: this should not be public, but interning currently needs access to it
+    pub(super) alloc_map: M::MemoryMap,
+
+    /// Map for "extra" function pointers.
+    extra_fn_ptr_map: FxHashMap<AllocId, M::ExtraFnVal>,
+
+    /// To be able to compare pointers with null, and to check alignment for accesses
+    /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
+    /// that do not exist any more.
+    // FIXME: this should not be public, but interning currently needs access to it
+    pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
+
+    /// Extra data added by the machine.
+    pub extra: M::MemoryExtra,
+
+    /// Lets us implement `HasDataLayout`, which is awfully convenient.
+    pub tcx: TyCtxt<'tcx>,
+}
+
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for Memory<'mir, 'tcx, M> {
+    #[inline]
+    fn data_layout(&self) -> &TargetDataLayout {
+        &self.tcx.data_layout
+    }
+}
+
+/// A reference to some allocation that was already bounds-checked for the given region
+/// and had the on-access machine hooks run.
+#[derive(Copy, Clone)]
+pub struct AllocRef<'a, 'tcx, Tag, Extra> {
+    alloc: &'a Allocation<Tag, Extra>,
+    range: AllocRange,
+    tcx: TyCtxt<'tcx>,
+    alloc_id: AllocId,
+}
+/// A reference to some allocation that was already bounds-checked for the given region
+/// and had the on-access machine hooks run.
+pub struct AllocRefMut<'a, 'tcx, Tag, Extra> {
+    alloc: &'a mut Allocation<Tag, Extra>,
+    range: AllocRange,
+    tcx: TyCtxt<'tcx>,
+    alloc_id: AllocId,
+}
+
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+    pub fn new(tcx: TyCtxt<'tcx>, extra: M::MemoryExtra) -> Self {
+        Memory {
+            alloc_map: M::MemoryMap::default(),
+            extra_fn_ptr_map: FxHashMap::default(),
+            dead_alloc_map: FxHashMap::default(),
+            extra,
+            tcx,
+        }
+    }
+
+    /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
+    /// the machine pointer to the allocation.  Must never be used
+    /// for any other pointers, nor for TLS statics.
+    ///
+    /// Using the resulting pointer represents a *direct* access to that memory
+    /// (e.g. by directly using a `static`),
+    /// as opposed to access through a pointer that was created by the program.
+    ///
+    /// This function can fail only if `ptr` points to an `extern static`.
+    #[inline]
+    pub fn global_base_pointer(
+        &self,
+        ptr: Pointer<AllocId>,
+    ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
+        // We know `offset` is relative to the allocation, so we can use `into_parts`.
+        let (alloc_id, offset) = ptr.into_parts();
+        // We need to handle `extern static`.
+        match self.tcx.get_global_alloc(alloc_id) {
+            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
+                bug!("global memory cannot point to thread-local static")
+            }
+            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
+                return M::extern_static_base_pointer(self, def_id);
+            }
+            _ => {}
+        }
+        // And we need to get the tag.
+        Ok(M::tag_alloc_base_pointer(self, Pointer::new(alloc_id, offset)))
+    }
+
+    pub fn create_fn_alloc(
+        &mut self,
+        fn_val: FnVal<'tcx, M::ExtraFnVal>,
+    ) -> Pointer<M::PointerTag> {
+        let id = match fn_val {
+            FnVal::Instance(instance) => self.tcx.create_fn_alloc(instance),
+            FnVal::Other(extra) => {
+                // FIXME(RalfJung): Should we have a cache here?
+                let id = self.tcx.reserve_alloc_id();
+                let old = self.extra_fn_ptr_map.insert(id, extra);
+                assert!(old.is_none());
+                id
+            }
+        };
+        // Functions are global allocations, so make sure we get the right base pointer.
+        // We know this is not an `extern static` so this cannot fail.
+        self.global_base_pointer(Pointer::from(id)).unwrap()
+    }
+
+    pub fn allocate(
+        &mut self,
+        size: Size,
+        align: Align,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> InterpResult<'static, Pointer<M::PointerTag>> {
+        let alloc = Allocation::uninit(size, align, M::PANIC_ON_ALLOC_FAIL)?;
+        Ok(self.allocate_with(alloc, kind))
+    }
+
+    pub fn allocate_bytes(
+        &mut self,
+        bytes: &[u8],
+        align: Align,
+        kind: MemoryKind<M::MemoryKind>,
+        mutability: Mutability,
+    ) -> Pointer<M::PointerTag> {
+        let alloc = Allocation::from_bytes(bytes, align, mutability);
+        self.allocate_with(alloc, kind)
+    }
+
+    pub fn allocate_with(
+        &mut self,
+        alloc: Allocation,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> Pointer<M::PointerTag> {
+        let id = self.tcx.reserve_alloc_id();
+        debug_assert_ne!(
+            Some(kind),
+            M::GLOBAL_KIND.map(MemoryKind::Machine),
+            "dynamically allocating global memory"
+        );
+        let alloc = M::init_allocation_extra(self, id, Cow::Owned(alloc), Some(kind));
+        self.alloc_map.insert(id, (kind, alloc.into_owned()));
+        M::tag_alloc_base_pointer(self, Pointer::from(id))
+    }
+
+    pub fn reallocate(
+        &mut self,
+        ptr: Pointer<Option<M::PointerTag>>,
+        old_size_and_align: Option<(Size, Align)>,
+        new_size: Size,
+        new_align: Align,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
+        let (alloc_id, offset, ptr) = self.ptr_get_alloc(ptr)?;
+        if offset.bytes() != 0 {
+            throw_ub_format!(
+                "reallocating {:?} which does not point to the beginning of an object",
+                ptr
+            );
+        }
+
+        // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
+        // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
+        let new_ptr = self.allocate(new_size, new_align, kind)?;
+        let old_size = match old_size_and_align {
+            Some((size, _align)) => size,
+            None => self.get_raw(alloc_id)?.size(),
+        };
+        // This will also call the access hooks.
+        self.copy(
+            ptr.into(),
+            Align::ONE,
+            new_ptr.into(),
+            Align::ONE,
+            old_size.min(new_size),
+            /*nonoverlapping*/ true,
+        )?;
+        self.deallocate(ptr.into(), old_size_and_align, kind)?;
+
+        Ok(new_ptr)
+    }
+
+    pub fn deallocate(
+        &mut self,
+        ptr: Pointer<Option<M::PointerTag>>,
+        old_size_and_align: Option<(Size, Align)>,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> InterpResult<'tcx> {
+        let (alloc_id, offset, ptr) = self.ptr_get_alloc(ptr)?;
+        trace!("deallocating: {}", alloc_id);
+
+        if offset.bytes() != 0 {
+            throw_ub_format!(
+                "deallocating {:?} which does not point to the beginning of an object",
+                ptr
+            );
+        }
+
+        let (alloc_kind, mut alloc) = match self.alloc_map.remove(&alloc_id) {
+            Some(alloc) => alloc,
+            None => {
+                // Deallocating global memory -- always an error
+                return Err(match self.tcx.get_global_alloc(alloc_id) {
+                    Some(GlobalAlloc::Function(..)) => {
+                        err_ub_format!("deallocating {}, which is a function", alloc_id)
+                    }
+                    Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
+                        err_ub_format!("deallocating {}, which is static memory", alloc_id)
+                    }
+                    None => err_ub!(PointerUseAfterFree(alloc_id)),
+                }
+                .into());
+            }
+        };
+
+        if alloc.mutability == Mutability::Not {
+            throw_ub_format!("deallocating immutable allocation {}", alloc_id);
+        }
+        if alloc_kind != kind {
+            throw_ub_format!(
+                "deallocating {}, which is {} memory, using {} deallocation operation",
+                alloc_id,
+                alloc_kind,
+                kind
+            );
+        }
+        if let Some((size, align)) = old_size_and_align {
+            if size != alloc.size() || align != alloc.align {
+                throw_ub_format!(
+                    "incorrect layout on deallocation: {} has size {} and alignment {}, but gave size {} and alignment {}",
+                    alloc_id,
+                    alloc.size().bytes(),
+                    alloc.align.bytes(),
+                    size.bytes(),
+                    align.bytes(),
+                )
+            }
+        }
+
+        // Let the machine take some extra action
+        let size = alloc.size();
+        M::memory_deallocated(
+            &mut self.extra,
+            &mut alloc.extra,
+            ptr.provenance,
+            alloc_range(Size::ZERO, size),
+        )?;
+
+        // Don't forget to remember size and align of this now-dead allocation
+        let old = self.dead_alloc_map.insert(alloc_id, (size, alloc.align));
+        if old.is_some() {
+            bug!("Nothing can be deallocated twice");
+        }
+
+        Ok(())
+    }
+
+    /// Internal helper function to determine the allocation and offset of a pointer (if any).
+    #[inline(always)]
+    fn get_ptr_access(
+        &self,
+        ptr: Pointer<Option<M::PointerTag>>,
+        size: Size,
+        align: Align,
+    ) -> InterpResult<'tcx, Option<(AllocId, Size, Pointer<M::PointerTag>)>> {
+        let align = M::enforce_alignment(&self.extra).then_some(align);
+        self.check_and_deref_ptr(
+            ptr,
+            size,
+            align,
+            CheckInAllocMsg::MemoryAccessTest,
+            |alloc_id, offset, ptr| {
+                let (size, align) =
+                    self.get_size_and_align(alloc_id, AllocCheck::Dereferenceable)?;
+                Ok((size, align, (alloc_id, offset, ptr)))
+            },
+        )
+    }
+
+    /// Check if the given pointer points to live memory of given `size` and `align`
+    /// (ignoring `M::enforce_alignment`). The caller can control the error message for the
+    /// out-of-bounds case.
+    #[inline(always)]
+    pub fn check_ptr_access_align(
+        &self,
+        ptr: Pointer<Option<M::PointerTag>>,
+        size: Size,
+        align: Align,
+        msg: CheckInAllocMsg,
+    ) -> InterpResult<'tcx> {
+        self.check_and_deref_ptr(ptr, size, Some(align), msg, |alloc_id, _, _| {
+            let check = match msg {
+                CheckInAllocMsg::DerefTest | CheckInAllocMsg::MemoryAccessTest => {
+                    AllocCheck::Dereferenceable
+                }
+                CheckInAllocMsg::PointerArithmeticTest | CheckInAllocMsg::InboundsTest => {
+                    AllocCheck::Live
+                }
+            };
+            let (size, align) = self.get_size_and_align(alloc_id, check)?;
+            Ok((size, align, ()))
+        })?;
+        Ok(())
+    }
+
+    /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
+    /// to the allocation it points to. Supports both shared and mutable references, as the actual
+    /// checking is offloaded to a helper closure. `align` defines whether and which alignment check
+    /// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
+    fn check_and_deref_ptr<T>(
+        &self,
+        ptr: Pointer<Option<M::PointerTag>>,
+        size: Size,
+        align: Option<Align>,
+        msg: CheckInAllocMsg,
+        alloc_size: impl FnOnce(
+            AllocId,
+            Size,
+            Pointer<M::PointerTag>,
+        ) -> InterpResult<'tcx, (Size, Align, T)>,
+    ) -> InterpResult<'tcx, Option<T>> {
+        fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
+            if offset % align.bytes() == 0 {
+                Ok(())
+            } else {
+                // The biggest power of two through which `offset` is divisible.
+                let offset_pow2 = 1 << offset.trailing_zeros();
+                throw_ub!(AlignmentCheckFailed {
+                    has: Align::from_bytes(offset_pow2).unwrap(),
+                    required: align,
+                })
+            }
+        }
+
+        // Extract from the pointer an `Option<AllocId>` and an offset, which is relative to the
+        // allocation or (if that is `None`) an absolute address.
+        let ptr_or_addr = if size.bytes() == 0 {
+            // Let's see what we can do, but don't throw errors if there's nothing there.
+            self.ptr_try_get_alloc(ptr)
+        } else {
+            // A "real" access, we insist on getting an `AllocId`.
+            Ok(self.ptr_get_alloc(ptr)?)
+        };
+        Ok(match ptr_or_addr {
+            Err(addr) => {
+                // No memory is actually being accessed.
+                debug_assert!(size.bytes() == 0);
+                // Must be non-null.
+                if addr == 0 {
+                    throw_ub!(DanglingIntPointer(0, msg))
+                }
+                // Must be aligned.
+                if let Some(align) = align {
+                    check_offset_align(addr, align)?;
+                }
+                None
+            }
+            Ok((alloc_id, offset, ptr)) => {
+                let (alloc_size, alloc_align, ret_val) = alloc_size(alloc_id, offset, ptr)?;
+                // Test bounds. This also ensures non-null.
+                // It is sufficient to check this for the end pointer. Also check for overflow!
+                if offset.checked_add(size, &self.tcx).map_or(true, |end| end > alloc_size) {
+                    throw_ub!(PointerOutOfBounds {
+                        alloc_id,
+                        alloc_size,
+                        ptr_offset: self.machine_usize_to_isize(offset.bytes()),
+                        ptr_size: size,
+                        msg,
+                    })
+                }
+                // Test align. Check this last; if both bounds and alignment are violated
+                // we want the error to be about the bounds.
+                if let Some(align) = align {
+                    if M::force_int_for_alignment_check(&self.extra) {
+                        let addr = Scalar::from_pointer(ptr, &self.tcx)
+                            .to_machine_usize(&self.tcx)
+                            .expect("ptr-to-int cast for align check should never fail");
+                        check_offset_align(addr, align)?;
+                    } else {
+                        // Check allocation alignment and offset alignment.
+                        if alloc_align.bytes() < align.bytes() {
+                            throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align });
+                        }
+                        check_offset_align(offset.bytes(), align)?;
+                    }
+                }
+
+                // We can still be zero-sized in this branch, in which case we have to
+                // return `None`.
+                if size.bytes() == 0 { None } else { Some(ret_val) }
+            }
+        })
+    }
+
+    /// Test if the pointer might be null.
+    pub fn ptr_may_be_null(&self, ptr: Pointer<Option<M::PointerTag>>) -> bool {
+        match self.ptr_try_get_alloc(ptr) {
+            Ok((alloc_id, offset, _)) => {
+                let (size, _align) = self
+                    .get_size_and_align(alloc_id, AllocCheck::MaybeDead)
+                    .expect("alloc info with MaybeDead cannot fail");
+                // If the pointer is out-of-bounds, it may be null.
+                // Note that one-past-the-end (offset == size) is still inbounds, and never null.
+                offset > size
+            }
+            Err(offset) => offset == 0,
+        }
+    }
+}
+
+/// Allocation accessors
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+    /// Helper function to obtain a global (tcx) allocation.
+    /// This attempts to return a reference to an existing allocation if
+    /// one can be found in `tcx`. That, however, is only possible if `tcx` and
+    /// this machine use the same pointer tag, so it is indirected through
+    /// `M::tag_allocation`.
+    fn get_global_alloc(
+        &self,
+        id: AllocId,
+        is_write: bool,
+    ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
+        let (alloc, def_id) = match self.tcx.get_global_alloc(id) {
+            Some(GlobalAlloc::Memory(mem)) => {
+                // Memory of a constant or promoted or anonymous memory referenced by a static.
+                (mem, None)
+            }
+            Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
+            None => throw_ub!(PointerUseAfterFree(id)),
+            Some(GlobalAlloc::Static(def_id)) => {
+                assert!(self.tcx.is_static(def_id));
+                assert!(!self.tcx.is_thread_local_static(def_id));
+                // Notice that every static has two `AllocId` that will resolve to the same
+                // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
+                // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
+                // `eval_static_initializer` and it is the "resolved" ID.
+                // The resolved ID is never used by the interpreted program, it is hidden.
+                // This is relied upon for soundness of const-patterns; a pointer to the resolved
+                // ID would "sidestep" the checks that make sure consts do not point to statics!
+                // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
+                // contains a reference to memory that was created during its evaluation (i.e., not
+                // to another static), those inner references only exist in "resolved" form.
+                if self.tcx.is_foreign_item(def_id) {
+                    throw_unsup!(ReadExternStatic(def_id));
+                }
+
+                (self.tcx.eval_static_initializer(def_id)?, Some(def_id))
+            }
+        };
+        M::before_access_global(&self.extra, id, alloc, def_id, is_write)?;
+        let alloc = Cow::Borrowed(alloc);
+        // We got tcx memory. Let the machine initialize its "extra" stuff.
+        let alloc = M::init_allocation_extra(
+            self,
+            id, // always use the ID we got as input, not the "hidden" one.
+            alloc,
+            M::GLOBAL_KIND.map(MemoryKind::Machine),
+        );
+        Ok(alloc)
+    }
+
+    /// Gives raw access to the `Allocation`, without bounds or alignment checks.
+    /// The caller is responsible for calling the access hooks!
+    fn get_raw(
+        &self,
+        id: AllocId,
+    ) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
+        // The error type of the inner closure here is somewhat funny.  We have two
+        // ways of "erroring": An actual error, or because we got a reference from
+        // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
+        // So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
+        let a = self.alloc_map.get_or(id, || {
+            let alloc = self.get_global_alloc(id, /*is_write*/ false).map_err(Err)?;
+            match alloc {
+                Cow::Borrowed(alloc) => {
+                    // We got a ref, cheaply return that as an "error" so that the
+                    // map does not get mutated.
+                    Err(Ok(alloc))
+                }
+                Cow::Owned(alloc) => {
+                    // Need to put it into the map and return a ref to that
+                    let kind = M::GLOBAL_KIND.expect(
+                        "I got a global allocation that I have to copy but the machine does \
+                            not expect that to happen",
+                    );
+                    Ok((MemoryKind::Machine(kind), alloc))
+                }
+            }
+        });
+        // Now unpack that funny error type
+        match a {
+            Ok(a) => Ok(&a.1),
+            Err(a) => a,
+        }
+    }
+
+    /// "Safe" (bounds and align-checked) allocation access.
+    pub fn get<'a>(
+        &'a self,
+        ptr: Pointer<Option<M::PointerTag>>,
+        size: Size,
+        align: Align,
+    ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
+        let align = M::enforce_alignment(&self.extra).then_some(align);
+        let ptr_and_alloc = self.check_and_deref_ptr(
+            ptr,
+            size,
+            align,
+            CheckInAllocMsg::MemoryAccessTest,
+            |alloc_id, offset, ptr| {
+                let alloc = self.get_raw(alloc_id)?;
+                Ok((alloc.size(), alloc.align, (alloc_id, offset, ptr, alloc)))
+            },
+        )?;
+        if let Some((alloc_id, offset, ptr, alloc)) = ptr_and_alloc {
+            let range = alloc_range(offset, size);
+            M::memory_read(&self.extra, &alloc.extra, ptr.provenance, range)?;
+            Ok(Some(AllocRef { alloc, range, tcx: self.tcx, alloc_id }))
+        } else {
+            // Even in this branch we have to be sure that we actually access the allocation, in
+            // order to ensure that `static FOO: Type = FOO;` causes a cycle error instead of
+            // magically pulling *any* ZST value from the ether. However, the `get_raw` above is
+            // always called when `ptr` has an `AllocId`.
+            Ok(None)
+        }
+    }
+
+    /// Return the `extra` field of the given allocation.
+    pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
+        Ok(&self.get_raw(id)?.extra)
+    }
+
+    /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
+    /// The caller is responsible for calling the access hooks!
+    ///
+    /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
+    /// allocation.
+    fn get_raw_mut(
+        &mut self,
+        id: AllocId,
+    ) -> InterpResult<'tcx, (&mut Allocation<M::PointerTag, M::AllocExtra>, &mut M::MemoryExtra)>
+    {
+        // We have "NLL problem case #3" here, which cannot be worked around without loss of
+        // efficiency even for the common case where the key is in the map.
+        // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
+        // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`.)
+        if self.alloc_map.get_mut(id).is_none() {
+            // Slow path.
+            // Allocation not found locally, go look global.
+            let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
+            let kind = M::GLOBAL_KIND.expect(
+                "I got a global allocation that I have to copy but the machine does \
+                    not expect that to happen",
+            );
+            self.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
+        }
+
+        let (_kind, alloc) = self.alloc_map.get_mut(id).unwrap();
+        if alloc.mutability == Mutability::Not {
+            throw_ub!(WriteToReadOnly(id))
+        }
+        Ok((alloc, &mut self.extra))
+    }
+
+    /// "Safe" (bounds and align-checked) allocation access.
+    pub fn get_mut<'a>(
+        &'a mut self,
+        ptr: Pointer<Option<M::PointerTag>>,
+        size: Size,
+        align: Align,
+    ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
+        let parts = self.get_ptr_access(ptr, size, align)?;
+        if let Some((alloc_id, offset, ptr)) = parts {
+            let tcx = self.tcx;
+            // FIXME: can we somehow avoid looking up the allocation twice here?
+            // We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
+            let (alloc, extra) = self.get_raw_mut(alloc_id)?;
+            let range = alloc_range(offset, size);
+            M::memory_written(extra, &mut alloc.extra, ptr.provenance, range)?;
+            Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id }))
+        } else {
+            Ok(None)
+        }
+    }
+
+    /// Return the `extra` field of the given allocation.
+    pub fn get_alloc_extra_mut<'a>(
+        &'a mut self,
+        id: AllocId,
+    ) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M::MemoryExtra)> {
+        let (alloc, memory_extra) = self.get_raw_mut(id)?;
+        Ok((&mut alloc.extra, memory_extra))
+    }
+
+    /// Obtain the size and alignment of an allocation, even if that allocation has
+    /// been deallocated.
+    ///
+    /// If `liveness` is `AllocCheck::MaybeDead`, this function always returns `Ok`.
+    pub fn get_size_and_align(
+        &self,
+        id: AllocId,
+        liveness: AllocCheck,
+    ) -> InterpResult<'static, (Size, Align)> {
+        // # Regular allocations
+        // Don't use `self.get_raw` here as that will
+        // a) cause cycles in case `id` refers to a static
+        // b) duplicate a global's allocation in miri
+        if let Some((_, alloc)) = self.alloc_map.get(id) {
+            return Ok((alloc.size(), alloc.align));
+        }
+
+        // # Function pointers
+        // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
+        if self.get_fn_alloc(id).is_some() {
+            return if let AllocCheck::Dereferenceable = liveness {
+                // The caller requested no function pointers.
+                throw_ub!(DerefFunctionPointer(id))
+            } else {
+                Ok((Size::ZERO, Align::ONE))
+            };
+        }
+
+        // # Statics
+        // Can't do this in the match argument, we may get cycle errors since the lock would
+        // be held throughout the match.
+        match self.tcx.get_global_alloc(id) {
+            Some(GlobalAlloc::Static(did)) => {
+                assert!(!self.tcx.is_thread_local_static(did));
+                // Use size and align of the type.
+                let ty = self.tcx.type_of(did);
+                let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
+                Ok((layout.size, layout.align.abi))
+            }
+            Some(GlobalAlloc::Memory(alloc)) => {
+                // Need to duplicate the logic here, because the global allocations have
+                // different associated types than the interpreter-local ones.
+                Ok((alloc.size(), alloc.align))
+            }
+            Some(GlobalAlloc::Function(_)) => bug!("We already checked function pointers above"),
+            // The rest must be dead.
+            None => {
+                if let AllocCheck::MaybeDead = liveness {
+                    // Deallocated pointers are allowed, we should be able to find
+                    // them in the map.
+                    Ok(*self
+                        .dead_alloc_map
+                        .get(&id)
+                        .expect("deallocated pointers should all be recorded in `dead_alloc_map`"))
+                } else {
+                    throw_ub!(PointerUseAfterFree(id))
+                }
+            }
+        }
+    }
+
+    fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
+        if let Some(extra) = self.extra_fn_ptr_map.get(&id) {
+            Some(FnVal::Other(*extra))
+        } else {
+            match self.tcx.get_global_alloc(id) {
+                Some(GlobalAlloc::Function(instance)) => Some(FnVal::Instance(instance)),
+                _ => None,
+            }
+        }
+    }
+
+    pub fn get_fn(
+        &self,
+        ptr: Pointer<Option<M::PointerTag>>,
+    ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
+        trace!("get_fn({:?})", ptr);
+        let (alloc_id, offset, _ptr) = self.ptr_get_alloc(ptr)?;
+        if offset.bytes() != 0 {
+            throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
+        }
+        self.get_fn_alloc(alloc_id)
+            .ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))).into())
+    }
+
+    pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
+        self.get_raw_mut(id)?.0.mutability = Mutability::Not;
+        Ok(())
+    }
+
+    /// Create a lazy debug printer that prints the given allocation and all allocations it points
+    /// to, recursively.
+    #[must_use]
+    pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'mir, 'tcx, M> {
+        self.dump_allocs(vec![id])
+    }
+
+    /// Create a lazy debug printer for a list of allocations and all allocations they point to,
+    /// recursively.
+    #[must_use]
+    pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'mir, 'tcx, M> {
+        allocs.sort();
+        allocs.dedup();
+        DumpAllocs { mem: self, allocs }
+    }
+
+    /// Print leaked memory. Allocations reachable from `static_roots` or a `Global` allocation
+    /// are not considered leaked. Leaks whose kind `may_leak()` returns true are not reported.
+    pub fn leak_report(&self, static_roots: &[AllocId]) -> usize {
+        // Collect the set of allocations that are *reachable* from `Global` allocations.
+        let reachable = {
+            let mut reachable = FxHashSet::default();
+            let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
+            let mut todo: Vec<_> = self.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
+                if Some(kind) == global_kind { Some(id) } else { None }
+            });
+            todo.extend(static_roots);
+            while let Some(id) = todo.pop() {
+                if reachable.insert(id) {
+                    // This is a new allocation, add its relocations to `todo`.
+                    if let Some((_, alloc)) = self.alloc_map.get(id) {
+                        todo.extend(alloc.relocations().values().map(|tag| tag.get_alloc_id()));
+                    }
+                }
+            }
+            reachable
+        };
+
+        // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
+        let leaks: Vec<_> = self.alloc_map.filter_map_collect(|&id, &(kind, _)| {
+            if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
+        });
+        let n = leaks.len();
+        if n > 0 {
+            eprintln!("The following memory was leaked: {:?}", self.dump_allocs(leaks));
+        }
+        n
+    }
+
+    /// This is used by [priroda](https://github.com/oli-obk/priroda)
+    pub fn alloc_map(&self) -> &M::MemoryMap {
+        &self.alloc_map
+    }
+}
+
+#[doc(hidden)]
+/// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
+pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    mem: &'a Memory<'mir, 'tcx, M>,
+    allocs: Vec<AllocId>,
+}
+
+impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
+    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        // Cannot be a closure because it is generic in `Tag`, `Extra`.
+        fn write_allocation_track_relocs<'tcx, Tag: Provenance, Extra>(
+            fmt: &mut std::fmt::Formatter<'_>,
+            tcx: TyCtxt<'tcx>,
+            allocs_to_print: &mut VecDeque<AllocId>,
+            alloc: &Allocation<Tag, Extra>,
+        ) -> std::fmt::Result {
+            for alloc_id in alloc.relocations().values().map(|tag| tag.get_alloc_id()) {
+                allocs_to_print.push_back(alloc_id);
+            }
+            write!(fmt, "{}", display_allocation(tcx, alloc))
+        }
+
+        let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
+        // `allocs_printed` contains all allocations that we have already printed.
+        let mut allocs_printed = FxHashSet::default();
+
+        while let Some(id) = allocs_to_print.pop_front() {
+            if !allocs_printed.insert(id) {
+                // Already printed, so skip this.
+                continue;
+            }
+
+            write!(fmt, "{}", id)?;
+            match self.mem.alloc_map.get(id) {
+                Some(&(kind, ref alloc)) => {
+                    // normal alloc
+                    write!(fmt, " ({}, ", kind)?;
+                    write_allocation_track_relocs(
+                        &mut *fmt,
+                        self.mem.tcx,
+                        &mut allocs_to_print,
+                        alloc,
+                    )?;
+                }
+                None => {
+                    // global alloc
+                    match self.mem.tcx.get_global_alloc(id) {
+                        Some(GlobalAlloc::Memory(alloc)) => {
+                            write!(fmt, " (unchanged global, ")?;
+                            write_allocation_track_relocs(
+                                &mut *fmt,
+                                self.mem.tcx,
+                                &mut allocs_to_print,
+                                alloc,
+                            )?;
+                        }
+                        Some(GlobalAlloc::Function(func)) => {
+                            write!(fmt, " (fn: {})", func)?;
+                        }
+                        Some(GlobalAlloc::Static(did)) => {
+                            write!(fmt, " (static: {})", self.mem.tcx.def_path_str(did))?;
+                        }
+                        None => {
+                            write!(fmt, " (deallocated)")?;
+                        }
+                    }
+                }
+            }
+            writeln!(fmt)?;
+        }
+        Ok(())
+    }
+}
+
+/// Reading and writing.
+impl<'tcx, 'a, Tag: Provenance, Extra> AllocRefMut<'a, 'tcx, Tag, Extra> {
+    pub fn write_scalar(
+        &mut self,
+        range: AllocRange,
+        val: ScalarMaybeUninit<Tag>,
+    ) -> InterpResult<'tcx> {
+        Ok(self
+            .alloc
+            .write_scalar(&self.tcx, self.range.subrange(range), val)
+            .map_err(|e| e.to_interp_error(self.alloc_id))?)
+    }
+
+    pub fn write_ptr_sized(
+        &mut self,
+        offset: Size,
+        val: ScalarMaybeUninit<Tag>,
+    ) -> InterpResult<'tcx> {
+        self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
+    }
+}
+
+impl<'tcx, 'a, Tag: Provenance, Extra> AllocRef<'a, 'tcx, Tag, Extra> {
+    pub fn read_scalar(&self, range: AllocRange) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
+        Ok(self
+            .alloc
+            .read_scalar(&self.tcx, self.range.subrange(range))
+            .map_err(|e| e.to_interp_error(self.alloc_id))?)
+    }
+
+    pub fn read_ptr_sized(&self, offset: Size) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
+        self.read_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size))
+    }
+
+    pub fn check_bytes(&self, range: AllocRange, allow_uninit_and_ptr: bool) -> InterpResult<'tcx> {
+        Ok(self
+            .alloc
+            .check_bytes(&self.tcx, self.range.subrange(range), allow_uninit_and_ptr)
+            .map_err(|e| e.to_interp_error(self.alloc_id))?)
+    }
+}
+
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+    /// Reads the given number of bytes from memory. Returns them as a slice.
+    ///
+    /// Performs appropriate bounds checks.
+    pub fn read_bytes(
+        &self,
+        ptr: Pointer<Option<M::PointerTag>>,
+        size: Size,
+    ) -> InterpResult<'tcx, &[u8]> {
+        let alloc_ref = match self.get(ptr, size, Align::ONE)? {
+            Some(a) => a,
+            None => return Ok(&[]), // zero-sized access
+        };
+        // Side-step AllocRef and directly access the underlying bytes more efficiently.
+        // (We are staying inside the bounds here so all is good.)
+        Ok(alloc_ref
+            .alloc
+            .get_bytes(&alloc_ref.tcx, alloc_ref.range)
+            .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?)
+    }
+
+    /// Writes the given stream of bytes into memory.
+    ///
+    /// Performs appropriate bounds checks.
+    pub fn write_bytes(
+        &mut self,
+        ptr: Pointer<Option<M::PointerTag>>,
+        src: impl IntoIterator<Item = u8>,
+    ) -> InterpResult<'tcx> {
+        let mut src = src.into_iter();
+        let (lower, upper) = src.size_hint();
+        let len = upper.expect("can only write bounded iterators");
+        assert_eq!(lower, len, "can only write iterators with a precise length");
+
+        let size = Size::from_bytes(len);
+        let alloc_ref = match self.get_mut(ptr, size, Align::ONE)? {
+            Some(alloc_ref) => alloc_ref,
+            None => {
+                // zero-sized access
+                assert_matches!(
+                    src.next(),
+                    None,
+                    "iterator said it was empty but returned an element"
+                );
+                return Ok(());
+            }
+        };
+
+        // Side-step AllocRef and directly access the underlying bytes more efficiently.
+        // (We are staying inside the bounds here so all is good.)
+        let alloc_id = alloc_ref.alloc_id;
+        let bytes = alloc_ref
+            .alloc
+            .get_bytes_mut(&alloc_ref.tcx, alloc_ref.range)
+            .map_err(move |e| e.to_interp_error(alloc_id))?;
+        // `zip` would stop when the first iterator ends; we want to definitely
+        // cover all of `bytes`.
+        for dest in bytes {
+            *dest = src.next().expect("iterator was shorter than it said it would be");
+        }
+        assert_matches!(src.next(), None, "iterator was longer than it said it would be");
+        Ok(())
+    }
+
+    pub fn copy(
+        &mut self,
+        src: Pointer<Option<M::PointerTag>>,
+        src_align: Align,
+        dest: Pointer<Option<M::PointerTag>>,
+        dest_align: Align,
+        size: Size,
+        nonoverlapping: bool,
+    ) -> InterpResult<'tcx> {
+        self.copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
+    }
+
+    pub fn copy_repeatedly(
+        &mut self,
+        src: Pointer<Option<M::PointerTag>>,
+        src_align: Align,
+        dest: Pointer<Option<M::PointerTag>>,
+        dest_align: Align,
+        size: Size,
+        num_copies: u64,
+        nonoverlapping: bool,
+    ) -> InterpResult<'tcx> {
+        let tcx = self.tcx;
+        // We need to do our own bounds-checks.
+        let src_parts = self.get_ptr_access(src, size, src_align)?;
+        let dest_parts = self.get_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
+
+        // FIXME: we look up both allocations twice here, once ebfore for the `check_ptr_access`
+        // and once below to get the underlying `&[mut] Allocation`.
+
+        // Source alloc preparations and access hooks.
+        let (src_alloc_id, src_offset, src) = match src_parts {
+            None => return Ok(()), // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
+            Some(src_ptr) => src_ptr,
+        };
+        let src_alloc = self.get_raw(src_alloc_id)?;
+        let src_range = alloc_range(src_offset, size);
+        M::memory_read(&self.extra, &src_alloc.extra, src.provenance, src_range)?;
+        // We need the `dest` ptr for the next operation, so we get it now.
+        // We already did the source checks and called the hooks so we are good to return early.
+        let (dest_alloc_id, dest_offset, dest) = match dest_parts {
+            None => return Ok(()), // Zero-sized *destiantion*.
+            Some(dest_ptr) => dest_ptr,
+        };
+
+        // first copy the relocations to a temporary buffer, because
+        // `get_bytes_mut` will clear the relocations, which is correct,
+        // since we don't want to keep any relocations at the target.
+        // (`get_bytes_with_uninit_and_ptr` below checks that there are no
+        // relocations overlapping the edges; those would not be handled correctly).
+        let relocations =
+            src_alloc.prepare_relocation_copy(self, src_range, dest_offset, num_copies);
+        // Prepare a copy of the initialization mask.
+        let compressed = src_alloc.compress_uninit_range(src_range);
+        // This checks relocation edges on the src.
+        let src_bytes = src_alloc
+            .get_bytes_with_uninit_and_ptr(&tcx, src_range)
+            .map_err(|e| e.to_interp_error(src_alloc_id))?
+            .as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
+
+        // Destination alloc preparations and access hooks.
+        let (dest_alloc, extra) = self.get_raw_mut(dest_alloc_id)?;
+        let dest_range = alloc_range(dest_offset, size * num_copies);
+        M::memory_written(extra, &mut dest_alloc.extra, dest.provenance, dest_range)?;
+        let dest_bytes = dest_alloc
+            .get_bytes_mut_ptr(&tcx, dest_range)
+            .map_err(|e| e.to_interp_error(dest_alloc_id))?
+            .as_mut_ptr();
+
+        if compressed.no_bytes_init() {
+            // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
+            // is marked as uninitialized but we otherwise omit changing the byte representation which may
+            // be arbitrary for uninitialized bytes.
+            // This also avoids writing to the target bytes so that the backing allocation is never
+            // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
+            // operating system this can avoid physically allocating the page.
+            dest_alloc.mark_init(dest_range, false); // `Size` multiplication
+            dest_alloc.mark_relocation_range(relocations);
+            return Ok(());
+        }
+
+        // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
+        // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
+        // `dest` could possibly overlap.
+        // The pointers above remain valid even if the `HashMap` table is moved around because they
+        // point into the `Vec` storing the bytes.
+        unsafe {
+            if src_alloc_id == dest_alloc_id {
+                if nonoverlapping {
+                    // `Size` additions
+                    if (src_offset <= dest_offset && src_offset + size > dest_offset)
+                        || (dest_offset <= src_offset && dest_offset + size > src_offset)
+                    {
+                        throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
+                    }
+                }
+
+                for i in 0..num_copies {
+                    ptr::copy(
+                        src_bytes,
+                        dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
+                        size.bytes_usize(),
+                    );
+                }
+            } else {
+                for i in 0..num_copies {
+                    ptr::copy_nonoverlapping(
+                        src_bytes,
+                        dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
+                        size.bytes_usize(),
+                    );
+                }
+            }
+        }
+
+        // now fill in all the "init" data
+        dest_alloc.mark_compressed_init_range(
+            &compressed,
+            alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
+            num_copies,
+        );
+        // copy the relocations to the destination
+        dest_alloc.mark_relocation_range(relocations);
+
+        Ok(())
+    }
+}
+
+/// Machine pointer introspection.
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+    pub fn scalar_to_ptr(&self, scalar: Scalar<M::PointerTag>) -> Pointer<Option<M::PointerTag>> {
+        // We use `to_bits_or_ptr_internal` since we are just implementing the method people need to
+        // call to force getting out a pointer.
+        match scalar.to_bits_or_ptr_internal(self.pointer_size()) {
+            Err(ptr) => ptr.into(),
+            Ok(bits) => {
+                let addr = u64::try_from(bits).unwrap();
+                let ptr = M::ptr_from_addr(&self, addr);
+                if addr == 0 {
+                    assert!(ptr.provenance.is_none(), "null pointer can never have an AllocId");
+                }
+                ptr
+            }
+        }
+    }
+
+    /// Turning a "maybe pointer" into a proper pointer (and some information
+    /// about where it points), or an absolute address.
+    pub fn ptr_try_get_alloc(
+        &self,
+        ptr: Pointer<Option<M::PointerTag>>,
+    ) -> Result<(AllocId, Size, Pointer<M::PointerTag>), u64> {
+        match ptr.into_pointer_or_addr() {
+            Ok(ptr) => {
+                let (alloc_id, offset) = M::ptr_get_alloc(self, ptr);
+                Ok((alloc_id, offset, ptr))
+            }
+            Err(addr) => Err(addr.bytes()),
+        }
+    }
+
+    /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
+    #[inline(always)]
+    pub fn ptr_get_alloc(
+        &self,
+        ptr: Pointer<Option<M::PointerTag>>,
+    ) -> InterpResult<'tcx, (AllocId, Size, Pointer<M::PointerTag>)> {
+        self.ptr_try_get_alloc(ptr).map_err(|offset| {
+            err_ub!(DanglingIntPointer(offset, CheckInAllocMsg::InboundsTest)).into()
+        })
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
new file mode 100644
index 00000000000..2b9fe565997
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -0,0 +1,33 @@
+//! An interpreter for MIR used in CTFE and by miri
+
+mod cast;
+mod eval_context;
+mod intern;
+mod intrinsics;
+mod machine;
+mod memory;
+mod operand;
+mod operator;
+mod place;
+mod step;
+mod terminator;
+mod traits;
+mod util;
+mod validity;
+mod visitor;
+
+pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here
+
+pub use self::eval_context::{
+    Frame, FrameInfo, InterpCx, LocalState, LocalValue, StackPopCleanup, StackPopUnwind,
+};
+pub use self::intern::{intern_const_alloc_recursive, InternKind};
+pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
+pub use self::memory::{AllocCheck, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
+pub use self::operand::{ImmTy, Immediate, OpTy, Operand};
+pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy};
+pub use self::validity::{CtfeValidationMode, RefTracking};
+pub use self::visitor::{MutValueVisitor, ValueVisitor};
+
+crate use self::intrinsics::eval_nullary_intrinsic;
+use eval_context::{from_known_layout, mir_assign_valid_types};
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
new file mode 100644
index 00000000000..63aca67c944
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -0,0 +1,762 @@
+//! Functions concerning immediate values and operands, and reading from operands.
+//! All high-level functions to read from memory work on operands as sources.
+
+use std::convert::TryFrom;
+use std::fmt::Write;
+
+use rustc_errors::ErrorReported;
+use rustc_hir::def::Namespace;
+use rustc_macros::HashStable;
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
+use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer};
+use rustc_middle::ty::{ConstInt, Ty};
+use rustc_middle::{mir, ty};
+use rustc_target::abi::{Abi, HasDataLayout, Size, TagEncoding};
+use rustc_target::abi::{VariantIdx, Variants};
+
+use super::{
+    alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, GlobalId,
+    InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Provenance,
+    Scalar, ScalarMaybeUninit,
+};
+
+/// An `Immediate` represents a single immediate self-contained Rust value.
+///
+/// For optimization of a few very common cases, there is also a representation for a pair of
+/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
+/// operations and wide pointers. This idea was taken from rustc's codegen.
+/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
+/// defined on `Immediate`, and do not have to work with a `Place`.
+#[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)]
+pub enum Immediate<Tag: Provenance = AllocId> {
+    Scalar(ScalarMaybeUninit<Tag>),
+    ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>),
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(Immediate, 56);
+
+impl<Tag: Provenance> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> {
+    #[inline(always)]
+    fn from(val: ScalarMaybeUninit<Tag>) -> Self {
+        Immediate::Scalar(val)
+    }
+}
+
+impl<Tag: Provenance> From<Scalar<Tag>> for Immediate<Tag> {
+    #[inline(always)]
+    fn from(val: Scalar<Tag>) -> Self {
+        Immediate::Scalar(val.into())
+    }
+}
+
+impl<'tcx, Tag: Provenance> Immediate<Tag> {
+    pub fn from_pointer(p: Pointer<Tag>, cx: &impl HasDataLayout) -> Self {
+        Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx))
+    }
+
+    pub fn from_maybe_pointer(p: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self {
+        Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx))
+    }
+
+    pub fn new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self {
+        Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
+    }
+
+    pub fn new_dyn_trait(
+        val: Scalar<Tag>,
+        vtable: Pointer<Option<Tag>>,
+        cx: &impl HasDataLayout,
+    ) -> Self {
+        Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_maybe_pointer(vtable, cx))
+    }
+
+    #[inline]
+    pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Tag> {
+        match self {
+            Immediate::Scalar(val) => val,
+            Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"),
+        }
+    }
+
+    #[inline]
+    pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>> {
+        self.to_scalar_or_uninit().check_init()
+    }
+
+    #[inline]
+    pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
+        match self {
+            Immediate::ScalarPair(val1, val2) => Ok((val1.check_init()?, val2.check_init()?)),
+            Immediate::Scalar(..) => {
+                bug!("Got a scalar where a scalar pair was expected")
+            }
+        }
+    }
+}
+
+// ScalarPair needs a type to interpret, so we often have an immediate and a type together
+// as input for binary and cast operations.
+#[derive(Copy, Clone, Debug)]
+pub struct ImmTy<'tcx, Tag: Provenance = AllocId> {
+    imm: Immediate<Tag>,
+    pub layout: TyAndLayout<'tcx>,
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
+
+impl<Tag: Provenance> std::fmt::Display for ImmTy<'tcx, Tag> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        /// Helper function for printing a scalar to a FmtPrinter
+        fn p<'a, 'tcx, F: std::fmt::Write, Tag: Provenance>(
+            cx: FmtPrinter<'a, 'tcx, F>,
+            s: ScalarMaybeUninit<Tag>,
+            ty: Ty<'tcx>,
+        ) -> Result<FmtPrinter<'a, 'tcx, F>, std::fmt::Error> {
+            match s {
+                ScalarMaybeUninit::Scalar(Scalar::Int(int)) => {
+                    cx.pretty_print_const_scalar_int(int, ty, true)
+                }
+                ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _sz)) => {
+                    // Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to
+                    // print what is points to, which would fail since it has no access to the local
+                    // memory.
+                    cx.pretty_print_const_pointer(ptr, ty, true)
+                }
+                ScalarMaybeUninit::Uninit => cx.typed_value(
+                    |mut this| {
+                        this.write_str("uninit ")?;
+                        Ok(this)
+                    },
+                    |this| this.print_type(ty),
+                    " ",
+                ),
+            }
+        }
+        ty::tls::with(|tcx| {
+            match self.imm {
+                Immediate::Scalar(s) => {
+                    if let Some(ty) = tcx.lift(self.layout.ty) {
+                        let cx = FmtPrinter::new(tcx, f, Namespace::ValueNS);
+                        p(cx, s, ty)?;
+                        return Ok(());
+                    }
+                    write!(f, "{}: {}", s, self.layout.ty)
+                }
+                Immediate::ScalarPair(a, b) => {
+                    // FIXME(oli-obk): at least print tuples and slices nicely
+                    write!(f, "({}, {}): {}", a, b, self.layout.ty,)
+                }
+            }
+        })
+    }
+}
+
+impl<'tcx, Tag: Provenance> std::ops::Deref for ImmTy<'tcx, Tag> {
+    type Target = Immediate<Tag>;
+    #[inline(always)]
+    fn deref(&self) -> &Immediate<Tag> {
+        &self.imm
+    }
+}
+
+/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
+/// or still in memory. The latter is an optimization, to delay reading that chunk of
+/// memory and to avoid having to store arbitrary-sized data here.
+#[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)]
+pub enum Operand<Tag: Provenance = AllocId> {
+    Immediate(Immediate<Tag>),
+    Indirect(MemPlace<Tag>),
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct OpTy<'tcx, Tag: Provenance = AllocId> {
+    op: Operand<Tag>, // Keep this private; it helps enforce invariants.
+    pub layout: TyAndLayout<'tcx>,
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(OpTy<'_>, 80);
+
+impl<'tcx, Tag: Provenance> std::ops::Deref for OpTy<'tcx, Tag> {
+    type Target = Operand<Tag>;
+    #[inline(always)]
+    fn deref(&self) -> &Operand<Tag> {
+        &self.op
+    }
+}
+
+impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
+    #[inline(always)]
+    fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
+        OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout }
+    }
+}
+
+impl<'tcx, Tag: Provenance> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
+    #[inline(always)]
+    fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self {
+        OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout }
+    }
+}
+
+impl<'tcx, Tag: Provenance> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
+    #[inline(always)]
+    fn from(val: ImmTy<'tcx, Tag>) -> Self {
+        OpTy { op: Operand::Immediate(val.imm), layout: val.layout }
+    }
+}
+
+impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> {
+    #[inline]
+    pub fn from_scalar(val: Scalar<Tag>, layout: TyAndLayout<'tcx>) -> Self {
+        ImmTy { imm: val.into(), layout }
+    }
+
+    #[inline]
+    pub fn from_immediate(imm: Immediate<Tag>, layout: TyAndLayout<'tcx>) -> Self {
+        ImmTy { imm, layout }
+    }
+
+    #[inline]
+    pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
+        Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
+    }
+    #[inline]
+    pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
+        Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
+    }
+
+    #[inline]
+    pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
+        Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
+    }
+
+    #[inline]
+    pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
+        Self::from_scalar(Scalar::from_int(i, layout.size), layout)
+    }
+
+    #[inline]
+    pub fn to_const_int(self) -> ConstInt {
+        assert!(self.layout.ty.is_integral());
+        let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int();
+        ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
+    /// Returns `None` if the layout does not permit loading this as a value.
+    fn try_read_immediate_from_mplace(
+        &self,
+        mplace: &MPlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> {
+        if mplace.layout.is_unsized() {
+            // Don't touch unsized
+            return Ok(None);
+        }
+
+        let alloc = match self.get_alloc(mplace)? {
+            Some(ptr) => ptr,
+            None => {
+                return Ok(Some(ImmTy {
+                    // zero-sized type
+                    imm: Scalar::ZST.into(),
+                    layout: mplace.layout,
+                }));
+            }
+        };
+
+        match mplace.layout.abi {
+            Abi::Scalar(..) => {
+                let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?;
+                Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }))
+            }
+            Abi::ScalarPair(ref a, ref b) => {
+                // We checked `ptr_align` above, so all fields will have the alignment they need.
+                // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
+                // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
+                let (a, b) = (&a.value, &b.value);
+                let (a_size, b_size) = (a.size(self), b.size(self));
+                let b_offset = a_size.align_to(b.align(self).abi);
+                assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
+                let a_val = alloc.read_scalar(alloc_range(Size::ZERO, a_size))?;
+                let b_val = alloc.read_scalar(alloc_range(b_offset, b_size))?;
+                Ok(Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }))
+            }
+            _ => Ok(None),
+        }
+    }
+
+    /// Try returning an immediate for the operand.
+    /// If the layout does not permit loading this as an immediate, return where in memory
+    /// we can find the data.
+    /// Note that for a given layout, this operation will either always fail or always
+    /// succeed!  Whether it succeeds depends on whether the layout can be represented
+    /// in an `Immediate`, not on which data is stored there currently.
+    pub fn try_read_immediate(
+        &self,
+        src: &OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
+        Ok(match src.try_as_mplace() {
+            Ok(ref mplace) => {
+                if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
+                    Ok(val)
+                } else {
+                    Err(*mplace)
+                }
+            }
+            Err(val) => Ok(val),
+        })
+    }
+
+    /// Read an immediate from a place, asserting that that is possible with the given layout.
+    #[inline(always)]
+    pub fn read_immediate(
+        &self,
+        op: &OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
+        if let Ok(imm) = self.try_read_immediate(op)? {
+            Ok(imm)
+        } else {
+            span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty);
+        }
+    }
+
+    /// Read a scalar from a place
+    pub fn read_scalar(
+        &self,
+        op: &OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
+        Ok(self.read_immediate(op)?.to_scalar_or_uninit())
+    }
+
+    /// Read a pointer from a place.
+    pub fn read_pointer(
+        &self,
+        op: &OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
+        Ok(self.scalar_to_ptr(self.read_scalar(op)?.check_init()?))
+    }
+
+    // Turn the wide MPlace into a string (must already be dereferenced!)
+    pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
+        let len = mplace.len(self)?;
+        let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len))?;
+        let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
+        Ok(str)
+    }
+
+    /// Projection functions
+    pub fn operand_field(
+        &self,
+        op: &OpTy<'tcx, M::PointerTag>,
+        field: usize,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        let base = match op.try_as_mplace() {
+            Ok(ref mplace) => {
+                // We can reuse the mplace field computation logic for indirect operands.
+                let field = self.mplace_field(mplace, field)?;
+                return Ok(field.into());
+            }
+            Err(value) => value,
+        };
+
+        let field_layout = op.layout.field(self, field);
+        if field_layout.is_zst() {
+            let immediate = Scalar::ZST.into();
+            return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout });
+        }
+        let offset = op.layout.fields.offset(field);
+        let immediate = match *base {
+            // the field covers the entire type
+            _ if offset.bytes() == 0 && field_layout.size == op.layout.size => *base,
+            // extract fields from types with `ScalarPair` ABI
+            Immediate::ScalarPair(a, b) => {
+                let val = if offset.bytes() == 0 { a } else { b };
+                Immediate::from(val)
+            }
+            Immediate::Scalar(val) => span_bug!(
+                self.cur_span(),
+                "field access on non aggregate {:#?}, {:#?}",
+                val,
+                op.layout
+            ),
+        };
+        Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout })
+    }
+
+    pub fn operand_index(
+        &self,
+        op: &OpTy<'tcx, M::PointerTag>,
+        index: u64,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        if let Ok(index) = usize::try_from(index) {
+            // We can just treat this as a field.
+            self.operand_field(op, index)
+        } else {
+            // Indexing into a big array. This must be an mplace.
+            let mplace = op.assert_mem_place();
+            Ok(self.mplace_index(&mplace, index)?.into())
+        }
+    }
+
+    pub fn operand_downcast(
+        &self,
+        op: &OpTy<'tcx, M::PointerTag>,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        // Downcasts only change the layout
+        Ok(match op.try_as_mplace() {
+            Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(),
+            Err(..) => {
+                let layout = op.layout.for_variant(self, variant);
+                OpTy { layout, ..*op }
+            }
+        })
+    }
+
+    pub fn operand_projection(
+        &self,
+        base: &OpTy<'tcx, M::PointerTag>,
+        proj_elem: mir::PlaceElem<'tcx>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        use rustc_middle::mir::ProjectionElem::*;
+        Ok(match proj_elem {
+            Field(field, _) => self.operand_field(base, field.index())?,
+            Downcast(_, variant) => self.operand_downcast(base, variant)?,
+            Deref => self.deref_operand(base)?.into(),
+            Subslice { .. } | ConstantIndex { .. } | Index(_) => {
+                // The rest should only occur as mplace, we do not use Immediates for types
+                // allowing such operations.  This matches place_projection forcing an allocation.
+                let mplace = base.assert_mem_place();
+                self.mplace_projection(&mplace, proj_elem)?.into()
+            }
+        })
+    }
+
+    /// Read from a local. Will not actually access the local if reading from a ZST.
+    /// Will not access memory, instead an indirect `Operand` is returned.
+    ///
+    /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
+    /// OpTy from a local
+    pub fn access_local(
+        &self,
+        frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
+        local: mir::Local,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        let layout = self.layout_of_local(frame, local, layout)?;
+        let op = if layout.is_zst() {
+            // Do not read from ZST, they might not be initialized
+            Operand::Immediate(Scalar::ZST.into())
+        } else {
+            M::access_local(&self, frame, local)?
+        };
+        Ok(OpTy { op, layout })
+    }
+
+    /// Every place can be read from, so we can turn them into an operand.
+    /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
+    /// will never actually read from memory.
+    #[inline(always)]
+    pub fn place_to_op(
+        &self,
+        place: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        let op = match **place {
+            Place::Ptr(mplace) => Operand::Indirect(mplace),
+            Place::Local { frame, local } => {
+                *self.access_local(&self.stack()[frame], local, None)?
+            }
+        };
+        Ok(OpTy { op, layout: place.layout })
+    }
+
+    // Evaluate a place with the goal of reading from it.  This lets us sometimes
+    // avoid allocations.
+    pub fn eval_place_to_op(
+        &self,
+        place: mir::Place<'tcx>,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        // Do not use the layout passed in as argument if the base we are looking at
+        // here is not the entire place.
+        let layout = if place.projection.is_empty() { layout } else { None };
+
+        let base_op = self.access_local(self.frame(), place.local, layout)?;
+
+        let op = place
+            .projection
+            .iter()
+            .try_fold(base_op, |op, elem| self.operand_projection(&op, elem))?;
+
+        trace!("eval_place_to_op: got {:?}", *op);
+        // Sanity-check the type we ended up with.
+        debug_assert!(mir_assign_valid_types(
+            *self.tcx,
+            self.param_env,
+            self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
+                place.ty(&self.frame().body.local_decls, *self.tcx).ty
+            ))?,
+            op.layout,
+        ));
+        Ok(op)
+    }
+
+    /// Evaluate the operand, returning a place where you can then find the data.
+    /// If you already know the layout, you can save two table lookups
+    /// by passing it in here.
+    #[inline]
+    pub fn eval_operand(
+        &self,
+        mir_op: &mir::Operand<'tcx>,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        use rustc_middle::mir::Operand::*;
+        let op = match *mir_op {
+            // FIXME: do some more logic on `move` to invalidate the old location
+            Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?,
+
+            Constant(ref constant) => {
+                let val =
+                    self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal);
+                // This can still fail:
+                // * During ConstProp, with `TooGeneric` or since the `requried_consts` were not all
+                //   checked yet.
+                // * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
+
+                self.mir_const_to_op(&val, layout)?
+            }
+        };
+        trace!("{:?}: {:?}", mir_op, *op);
+        Ok(op)
+    }
+
+    /// Evaluate a bunch of operands at once
+    pub(super) fn eval_operands(
+        &self,
+        ops: &[mir::Operand<'tcx>],
+    ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
+        ops.iter().map(|op| self.eval_operand(op, None)).collect()
+    }
+
+    // Used when the miri-engine runs into a constant and for extracting information from constants
+    // in patterns via the `const_eval` module
+    /// The `val` and `layout` are assumed to already be in our interpreter
+    /// "universe" (param_env).
+    pub fn const_to_op(
+        &self,
+        val: &ty::Const<'tcx>,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        match val.val {
+            ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
+            ty::ConstKind::Error(_) => throw_inval!(AlreadyReported(ErrorReported)),
+            ty::ConstKind::Unevaluated(uv) => {
+                let instance = self.resolve(uv.def, uv.substs(*self.tcx))?;
+                Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into())
+            }
+            ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => {
+                span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", val)
+            }
+            ty::ConstKind::Value(val_val) => self.const_val_to_op(val_val, val.ty, layout),
+        }
+    }
+
+    pub fn mir_const_to_op(
+        &self,
+        val: &mir::ConstantKind<'tcx>,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        match val {
+            mir::ConstantKind::Ty(ct) => self.const_to_op(ct, layout),
+            mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, ty, layout),
+        }
+    }
+
+    crate fn const_val_to_op(
+        &self,
+        val_val: ConstValue<'tcx>,
+        ty: Ty<'tcx>,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        // Other cases need layout.
+        let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
+            Ok(match scalar {
+                Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size),
+                Scalar::Int(int) => Scalar::Int(int),
+            })
+        };
+        let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
+        let op = match val_val {
+            ConstValue::ByRef { alloc, offset } => {
+                let id = self.tcx.create_memory_alloc(alloc);
+                // We rely on mutability being set correctly in that allocation to prevent writes
+                // where none should happen.
+                let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
+                Operand::Indirect(MemPlace::from_ptr(ptr.into(), layout.align.abi))
+            }
+            ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x)?.into()),
+            ConstValue::Slice { data, start, end } => {
+                // We rely on mutability being set correctly in `data` to prevent writes
+                // where none should happen.
+                let ptr = Pointer::new(
+                    self.tcx.create_memory_alloc(data),
+                    Size::from_bytes(start), // offset: `start`
+                );
+                Operand::Immediate(Immediate::new_slice(
+                    Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
+                    u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
+                    self,
+                ))
+            }
+        };
+        Ok(OpTy { op, layout })
+    }
+
+    /// Read discriminant, return the runtime value as well as the variant index.
+    pub fn read_discriminant(
+        &self,
+        op: &OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, VariantIdx)> {
+        trace!("read_discriminant_value {:#?}", op.layout);
+        // Get type and layout of the discriminant.
+        let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
+        trace!("discriminant type: {:?}", discr_layout.ty);
+
+        // We use "discriminant" to refer to the value associated with a particular enum variant.
+        // This is not to be confused with its "variant index", which is just determining its position in the
+        // declared list of variants -- they can differ with explicitly assigned discriminants.
+        // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
+        // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
+        let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
+            Variants::Single { index } => {
+                let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
+                    Some(discr) => {
+                        // This type actually has discriminants.
+                        assert_eq!(discr.ty, discr_layout.ty);
+                        Scalar::from_uint(discr.val, discr_layout.size)
+                    }
+                    None => {
+                        // On a type without actual discriminants, variant is 0.
+                        assert_eq!(index.as_u32(), 0);
+                        Scalar::from_uint(index.as_u32(), discr_layout.size)
+                    }
+                };
+                return Ok((discr, index));
+            }
+            Variants::Multiple { ref tag, ref tag_encoding, tag_field, .. } => {
+                (tag, tag_encoding, tag_field)
+            }
+        };
+
+        // There are *three* layouts that come into play here:
+        // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
+        //   the `Scalar` we return.
+        // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
+        //   and used to interpret the value we read from the tag field.
+        //   For the return value, a cast to `discr_layout` is performed.
+        // - The field storing the tag has a layout, which is very similar to `tag_layout` but
+        //   may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
+
+        // Get layout for tag.
+        let tag_layout = self.layout_of(tag_scalar_layout.value.to_int_ty(*self.tcx))?;
+
+        // Read tag and sanity-check `tag_layout`.
+        let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
+        assert_eq!(tag_layout.size, tag_val.layout.size);
+        assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
+        let tag_val = tag_val.to_scalar()?;
+        trace!("tag value: {:?}", tag_val);
+
+        // Figure out which discriminant and variant this corresponds to.
+        Ok(match *tag_encoding {
+            TagEncoding::Direct => {
+                let tag_bits = tag_val
+                    .try_to_int()
+                    .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
+                    .assert_bits(tag_layout.size);
+                // Cast bits from tag layout to discriminant layout.
+                let discr_val = self.cast_from_scalar(tag_bits, tag_layout, discr_layout.ty);
+                let discr_bits = discr_val.assert_bits(discr_layout.size);
+                // Convert discriminant to variant index, and catch invalid discriminants.
+                let index = match *op.layout.ty.kind() {
+                    ty::Adt(adt, _) => {
+                        adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
+                    }
+                    ty::Generator(def_id, substs, _) => {
+                        let substs = substs.as_generator();
+                        substs
+                            .discriminants(def_id, *self.tcx)
+                            .find(|(_, var)| var.val == discr_bits)
+                    }
+                    _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
+                }
+                .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
+                // Return the cast value, and the index.
+                (discr_val, index.0)
+            }
+            TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+                // Compute the variant this niche value/"tag" corresponds to. With niche layout,
+                // discriminant (encoded in niche/tag) and variant index are the same.
+                let variants_start = niche_variants.start().as_u32();
+                let variants_end = niche_variants.end().as_u32();
+                let variant = match tag_val.try_to_int() {
+                    Err(dbg_val) => {
+                        // So this is a pointer then, and casting to an int failed.
+                        // Can only happen during CTFE.
+                        let ptr = self.scalar_to_ptr(tag_val);
+                        // The niche must be just 0, and the ptr not null, then we know this is
+                        // okay. Everything else, we conservatively reject.
+                        let ptr_valid = niche_start == 0
+                            && variants_start == variants_end
+                            && !self.memory.ptr_may_be_null(ptr);
+                        if !ptr_valid {
+                            throw_ub!(InvalidTag(dbg_val))
+                        }
+                        dataful_variant
+                    }
+                    Ok(tag_bits) => {
+                        let tag_bits = tag_bits.assert_bits(tag_layout.size);
+                        // We need to use machine arithmetic to get the relative variant idx:
+                        // variant_index_relative = tag_val - niche_start_val
+                        let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
+                        let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+                        let variant_index_relative_val =
+                            self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
+                        let variant_index_relative = variant_index_relative_val
+                            .to_scalar()?
+                            .assert_bits(tag_val.layout.size);
+                        // Check if this is in the range that indicates an actual discriminant.
+                        if variant_index_relative <= u128::from(variants_end - variants_start) {
+                            let variant_index_relative = u32::try_from(variant_index_relative)
+                                .expect("we checked that this fits into a u32");
+                            // Then computing the absolute variant idx should not overflow any more.
+                            let variant_index = variants_start
+                                .checked_add(variant_index_relative)
+                                .expect("overflow computing absolute variant idx");
+                            let variants_len = op
+                                .layout
+                                .ty
+                                .ty_adt_def()
+                                .expect("tagged layout for non adt")
+                                .variants
+                                .len();
+                            assert!(usize::try_from(variant_index).unwrap() < variants_len);
+                            VariantIdx::from_u32(variant_index)
+                        } else {
+                            dataful_variant
+                        }
+                    }
+                };
+                // Compute the size of the scalar we need to return.
+                // No need to cast, because the variant index directly serves as discriminant and is
+                // encoded in the tag.
+                (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
+            }
+        })
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
new file mode 100644
index 00000000000..ac000b1bb56
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -0,0 +1,417 @@
+use std::convert::TryFrom;
+
+use rustc_apfloat::Float;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{InterpResult, Scalar};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::{self, FloatTy, Ty};
+
+use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Applies the binary operation `op` to the two operands and writes a tuple of the result
+    /// and a boolean signifying the potential overflow to the destination.
+    pub fn binop_with_overflow(
+        &mut self,
+        op: mir::BinOp,
+        left: &ImmTy<'tcx, M::PointerTag>,
+        right: &ImmTy<'tcx, M::PointerTag>,
+        dest: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
+        debug_assert_eq!(
+            self.tcx.intern_tup(&[ty, self.tcx.types.bool]),
+            dest.layout.ty,
+            "type mismatch for result of {:?}",
+            op,
+        );
+        let val = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
+        self.write_immediate(val, dest)
+    }
+
+    /// Applies the binary operation `op` to the arguments and writes the result to the
+    /// destination.
+    pub fn binop_ignore_overflow(
+        &mut self,
+        op: mir::BinOp,
+        left: &ImmTy<'tcx, M::PointerTag>,
+        right: &ImmTy<'tcx, M::PointerTag>,
+        dest: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
+        assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op);
+        self.write_scalar(val, dest)
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    fn binary_char_op(
+        &self,
+        bin_op: mir::BinOp,
+        l: char,
+        r: char,
+    ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
+        use rustc_middle::mir::BinOp::*;
+
+        let res = match bin_op {
+            Eq => l == r,
+            Ne => l != r,
+            Lt => l < r,
+            Le => l <= r,
+            Gt => l > r,
+            Ge => l >= r,
+            _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
+        };
+        (Scalar::from_bool(res), false, self.tcx.types.bool)
+    }
+
+    fn binary_bool_op(
+        &self,
+        bin_op: mir::BinOp,
+        l: bool,
+        r: bool,
+    ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
+        use rustc_middle::mir::BinOp::*;
+
+        let res = match bin_op {
+            Eq => l == r,
+            Ne => l != r,
+            Lt => l < r,
+            Le => l <= r,
+            Gt => l > r,
+            Ge => l >= r,
+            BitAnd => l & r,
+            BitOr => l | r,
+            BitXor => l ^ r,
+            _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
+        };
+        (Scalar::from_bool(res), false, self.tcx.types.bool)
+    }
+
+    fn binary_float_op<F: Float + Into<Scalar<M::PointerTag>>>(
+        &self,
+        bin_op: mir::BinOp,
+        ty: Ty<'tcx>,
+        l: F,
+        r: F,
+    ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
+        use rustc_middle::mir::BinOp::*;
+
+        let (val, ty) = match bin_op {
+            Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
+            Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
+            Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
+            Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
+            Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
+            Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
+            Add => ((l + r).value.into(), ty),
+            Sub => ((l - r).value.into(), ty),
+            Mul => ((l * r).value.into(), ty),
+            Div => ((l / r).value.into(), ty),
+            Rem => ((l % r).value.into(), ty),
+            _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
+        };
+        (val, false, ty)
+    }
+
+    fn binary_int_op(
+        &self,
+        bin_op: mir::BinOp,
+        // passing in raw bits
+        l: u128,
+        left_layout: TyAndLayout<'tcx>,
+        r: u128,
+        right_layout: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
+        use rustc_middle::mir::BinOp::*;
+
+        // Shift ops can have an RHS with a different numeric type.
+        if bin_op == Shl || bin_op == Shr {
+            let signed = left_layout.abi.is_signed();
+            let size = u128::from(left_layout.size.bits());
+            let overflow = r >= size;
+            let r = r % size; // mask to type size
+            let r = u32::try_from(r).unwrap(); // we masked so this will always fit
+            let result = if signed {
+                let l = self.sign_extend(l, left_layout) as i128;
+                let result = match bin_op {
+                    Shl => l.checked_shl(r).unwrap(),
+                    Shr => l.checked_shr(r).unwrap(),
+                    _ => bug!("it has already been checked that this is a shift op"),
+                };
+                result as u128
+            } else {
+                match bin_op {
+                    Shl => l.checked_shl(r).unwrap(),
+                    Shr => l.checked_shr(r).unwrap(),
+                    _ => bug!("it has already been checked that this is a shift op"),
+                }
+            };
+            let truncated = self.truncate(result, left_layout);
+            return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
+        }
+
+        // For the remaining ops, the types must be the same on both sides
+        if left_layout.ty != right_layout.ty {
+            span_bug!(
+                self.cur_span(),
+                "invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})",
+                bin_op,
+                l,
+                left_layout.ty,
+                r,
+                right_layout.ty,
+            )
+        }
+
+        let size = left_layout.size;
+
+        // Operations that need special treatment for signed integers
+        if left_layout.abi.is_signed() {
+            let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
+                Lt => Some(i128::lt),
+                Le => Some(i128::le),
+                Gt => Some(i128::gt),
+                Ge => Some(i128::ge),
+                _ => None,
+            };
+            if let Some(op) = op {
+                let l = self.sign_extend(l, left_layout) as i128;
+                let r = self.sign_extend(r, right_layout) as i128;
+                return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool));
+            }
+            let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
+                Div if r == 0 => throw_ub!(DivisionByZero),
+                Rem if r == 0 => throw_ub!(RemainderByZero),
+                Div => Some(i128::overflowing_div),
+                Rem => Some(i128::overflowing_rem),
+                Add => Some(i128::overflowing_add),
+                Sub => Some(i128::overflowing_sub),
+                Mul => Some(i128::overflowing_mul),
+                _ => None,
+            };
+            if let Some(op) = op {
+                let r = self.sign_extend(r, right_layout) as i128;
+                // We need a special check for overflowing remainder:
+                // "int_min % -1" overflows and returns 0, but after casting things to a larger int
+                // type it does *not* overflow nor give an unrepresentable result!
+                if bin_op == Rem {
+                    if r == -1 && l == (1 << (size.bits() - 1)) {
+                        return Ok((Scalar::from_int(0, size), true, left_layout.ty));
+                    }
+                }
+                let l = self.sign_extend(l, left_layout) as i128;
+
+                let (result, oflo) = op(l, r);
+                // This may be out-of-bounds for the result type, so we have to truncate ourselves.
+                // If that truncation loses any information, we have an overflow.
+                let result = result as u128;
+                let truncated = self.truncate(result, left_layout);
+                return Ok((
+                    Scalar::from_uint(truncated, size),
+                    oflo || self.sign_extend(truncated, left_layout) != result,
+                    left_layout.ty,
+                ));
+            }
+        }
+
+        let (val, ty) = match bin_op {
+            Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
+            Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
+
+            Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
+            Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
+            Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
+            Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
+
+            BitOr => (Scalar::from_uint(l | r, size), left_layout.ty),
+            BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
+            BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
+
+            Add | Sub | Mul | Rem | Div => {
+                assert!(!left_layout.abi.is_signed());
+                let op: fn(u128, u128) -> (u128, bool) = match bin_op {
+                    Add => u128::overflowing_add,
+                    Sub => u128::overflowing_sub,
+                    Mul => u128::overflowing_mul,
+                    Div if r == 0 => throw_ub!(DivisionByZero),
+                    Rem if r == 0 => throw_ub!(RemainderByZero),
+                    Div => u128::overflowing_div,
+                    Rem => u128::overflowing_rem,
+                    _ => bug!(),
+                };
+                let (result, oflo) = op(l, r);
+                // Truncate to target type.
+                // If that truncation loses any information, we have an overflow.
+                let truncated = self.truncate(result, left_layout);
+                return Ok((
+                    Scalar::from_uint(truncated, size),
+                    oflo || truncated != result,
+                    left_layout.ty,
+                ));
+            }
+
+            _ => span_bug!(
+                self.cur_span(),
+                "invalid binary op {:?}: {:?}, {:?} (both {:?})",
+                bin_op,
+                l,
+                r,
+                right_layout.ty,
+            ),
+        };
+
+        Ok((val, false, ty))
+    }
+
+    /// Returns the result of the specified operation, whether it overflowed, and
+    /// the result type.
+    pub fn overflowing_binary_op(
+        &self,
+        bin_op: mir::BinOp,
+        left: &ImmTy<'tcx, M::PointerTag>,
+        right: &ImmTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
+        trace!(
+            "Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
+            bin_op,
+            *left,
+            left.layout.ty,
+            *right,
+            right.layout.ty
+        );
+
+        match left.layout.ty.kind() {
+            ty::Char => {
+                assert_eq!(left.layout.ty, right.layout.ty);
+                let left = left.to_scalar()?;
+                let right = right.to_scalar()?;
+                Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
+            }
+            ty::Bool => {
+                assert_eq!(left.layout.ty, right.layout.ty);
+                let left = left.to_scalar()?;
+                let right = right.to_scalar()?;
+                Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
+            }
+            ty::Float(fty) => {
+                assert_eq!(left.layout.ty, right.layout.ty);
+                let ty = left.layout.ty;
+                let left = left.to_scalar()?;
+                let right = right.to_scalar()?;
+                Ok(match fty {
+                    FloatTy::F32 => {
+                        self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?)
+                    }
+                    FloatTy::F64 => {
+                        self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?)
+                    }
+                })
+            }
+            _ if left.layout.ty.is_integral() => {
+                // the RHS type can be different, e.g. for shifts -- but it has to be integral, too
+                assert!(
+                    right.layout.ty.is_integral(),
+                    "Unexpected types for BinOp: {:?} {:?} {:?}",
+                    left.layout.ty,
+                    bin_op,
+                    right.layout.ty
+                );
+
+                let l = left.to_scalar()?.to_bits(left.layout.size)?;
+                let r = right.to_scalar()?.to_bits(right.layout.size)?;
+                self.binary_int_op(bin_op, l, left.layout, r, right.layout)
+            }
+            _ if left.layout.ty.is_any_ptr() => {
+                // The RHS type must be the same *or an integer type* (for `Offset`).
+                assert!(
+                    right.layout.ty == left.layout.ty || right.layout.ty.is_integral(),
+                    "Unexpected types for BinOp: {:?} {:?} {:?}",
+                    left.layout.ty,
+                    bin_op,
+                    right.layout.ty
+                );
+
+                M::binary_ptr_op(self, bin_op, left, right)
+            }
+            _ => span_bug!(
+                self.cur_span(),
+                "Invalid MIR: bad LHS type for binop: {:?}",
+                left.layout.ty
+            ),
+        }
+    }
+
+    /// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
+    #[inline]
+    pub fn binary_op(
+        &self,
+        bin_op: mir::BinOp,
+        left: &ImmTy<'tcx, M::PointerTag>,
+        right: &ImmTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
+        let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
+        Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
+    }
+
+    /// Returns the result of the specified operation, whether it overflowed, and
+    /// the result type.
+    pub fn overflowing_unary_op(
+        &self,
+        un_op: mir::UnOp,
+        val: &ImmTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
+        use rustc_middle::mir::UnOp::*;
+
+        let layout = val.layout;
+        let val = val.to_scalar()?;
+        trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty);
+
+        match layout.ty.kind() {
+            ty::Bool => {
+                let val = val.to_bool()?;
+                let res = match un_op {
+                    Not => !val,
+                    _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
+                };
+                Ok((Scalar::from_bool(res), false, self.tcx.types.bool))
+            }
+            ty::Float(fty) => {
+                let res = match (un_op, fty) {
+                    (Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
+                    (Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
+                    _ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
+                };
+                Ok((res, false, layout.ty))
+            }
+            _ => {
+                assert!(layout.ty.is_integral());
+                let val = val.to_bits(layout.size)?;
+                let (res, overflow) = match un_op {
+                    Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
+                    Neg => {
+                        // arithmetic negation
+                        assert!(layout.abi.is_signed());
+                        let val = self.sign_extend(val, layout) as i128;
+                        let (res, overflow) = val.overflowing_neg();
+                        let res = res as u128;
+                        // Truncate to target type.
+                        // If that truncation loses any information, we have an overflow.
+                        let truncated = self.truncate(res, layout);
+                        (truncated, overflow || self.sign_extend(truncated, layout) != res)
+                    }
+                };
+                Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty))
+            }
+        }
+    }
+
+    pub fn unary_op(
+        &self,
+        un_op: mir::UnOp,
+        val: &ImmTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
+        let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
+        Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
new file mode 100644
index 00000000000..95a44e3fecf
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -0,0 +1,1091 @@
+//! Computations on places -- field projections, going from mir::Place, and writing
+//! into a place.
+//! All high-level functions to write to memory work on places as destinations.
+
+use std::convert::TryFrom;
+use std::hash::Hash;
+
+use rustc_ast::Mutability;
+use rustc_macros::HashStable;
+use rustc_middle::mir;
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
+use rustc_middle::ty::{self, Ty};
+use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding};
+use rustc_target::abi::{HasDataLayout, Size, VariantIdx, Variants};
+
+use super::{
+    alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
+    ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy,
+    Operand, Pointer, PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit,
+};
+
+#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)]
+/// Information required for the sound usage of a `MemPlace`.
+pub enum MemPlaceMeta<Tag: Provenance = AllocId> {
+    /// The unsized payload (e.g. length for slices or vtable pointer for trait objects).
+    Meta(Scalar<Tag>),
+    /// `Sized` types or unsized `extern type`
+    None,
+    /// The address of this place may not be taken. This protects the `MemPlace` from coming from
+    /// a ZST Operand without a backing allocation and being converted to an integer address. This
+    /// should be impossible, because you can't take the address of an operand, but this is a second
+    /// protection layer ensuring that we don't mess up.
+    Poison,
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(MemPlaceMeta, 24);
+
+impl<Tag: Provenance> MemPlaceMeta<Tag> {
+    pub fn unwrap_meta(self) -> Scalar<Tag> {
+        match self {
+            Self::Meta(s) => s,
+            Self::None | Self::Poison => {
+                bug!("expected wide pointer extra data (e.g. slice length or trait object vtable)")
+            }
+        }
+    }
+    fn has_meta(self) -> bool {
+        match self {
+            Self::Meta(_) => true,
+            Self::None | Self::Poison => false,
+        }
+    }
+}
+
+#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)]
+pub struct MemPlace<Tag: Provenance = AllocId> {
+    /// The pointer can be a pure integer, with the `None` tag.
+    pub ptr: Pointer<Option<Tag>>,
+    pub align: Align,
+    /// Metadata for unsized places. Interpretation is up to the type.
+    /// Must not be present for sized types, but can be missing for unsized types
+    /// (e.g., `extern type`).
+    pub meta: MemPlaceMeta<Tag>,
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(MemPlace, 48);
+
+#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)]
+pub enum Place<Tag: Provenance = AllocId> {
+    /// A place referring to a value allocated in the `Memory` system.
+    Ptr(MemPlace<Tag>),
+
+    /// To support alloc-free locals, we are able to write directly to a local.
+    /// (Without that optimization, we'd just always be a `MemPlace`.)
+    Local { frame: usize, local: mir::Local },
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(Place, 56);
+
+#[derive(Copy, Clone, Debug)]
+pub struct PlaceTy<'tcx, Tag: Provenance = AllocId> {
+    place: Place<Tag>, // Keep this private; it helps enforce invariants.
+    pub layout: TyAndLayout<'tcx>,
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(PlaceTy<'_>, 72);
+
+impl<'tcx, Tag: Provenance> std::ops::Deref for PlaceTy<'tcx, Tag> {
+    type Target = Place<Tag>;
+    #[inline(always)]
+    fn deref(&self) -> &Place<Tag> {
+        &self.place
+    }
+}
+
+/// A MemPlace with its layout. Constructing it is only possible in this module.
+#[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)]
+pub struct MPlaceTy<'tcx, Tag: Provenance = AllocId> {
+    mplace: MemPlace<Tag>,
+    pub layout: TyAndLayout<'tcx>,
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 64);
+
+impl<'tcx, Tag: Provenance> std::ops::Deref for MPlaceTy<'tcx, Tag> {
+    type Target = MemPlace<Tag>;
+    #[inline(always)]
+    fn deref(&self) -> &MemPlace<Tag> {
+        &self.mplace
+    }
+}
+
+impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> {
+    #[inline(always)]
+    fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
+        PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout }
+    }
+}
+
+impl<Tag: Provenance> MemPlace<Tag> {
+    #[inline(always)]
+    pub fn from_ptr(ptr: Pointer<Option<Tag>>, align: Align) -> Self {
+        MemPlace { ptr, align, meta: MemPlaceMeta::None }
+    }
+
+    /// Adjust the provenance of the main pointer (metadata is unaffected).
+    pub fn map_provenance(self, f: impl FnOnce(Option<Tag>) -> Option<Tag>) -> Self {
+        MemPlace { ptr: self.ptr.map_provenance(f), ..self }
+    }
+
+    /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
+    /// This is the inverse of `ref_to_mplace`.
+    #[inline(always)]
+    pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Tag> {
+        match self.meta {
+            MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
+            MemPlaceMeta::Meta(meta) => {
+                Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx).into(), meta.into())
+            }
+            MemPlaceMeta::Poison => bug!(
+                "MPlaceTy::dangling may never be used to produce a \
+                place that will have the address of its pointee taken"
+            ),
+        }
+    }
+
+    #[inline]
+    pub fn offset(
+        self,
+        offset: Size,
+        meta: MemPlaceMeta<Tag>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, Self> {
+        Ok(MemPlace {
+            ptr: self.ptr.offset(offset, cx)?,
+            align: self.align.restrict_for_offset(offset),
+            meta,
+        })
+    }
+}
+
+impl<'tcx, Tag: Provenance> MPlaceTy<'tcx, Tag> {
+    /// Produces a MemPlace that works for ZST but nothing else
+    #[inline]
+    pub fn dangling(layout: TyAndLayout<'tcx>) -> Self {
+        let align = layout.align.abi;
+        let ptr = Pointer::new(None, Size::from_bytes(align.bytes())); // no provenance, absolute address
+        // `Poison` this to make sure that the pointer value `ptr` is never observable by the program.
+        MPlaceTy { mplace: MemPlace { ptr, align, meta: MemPlaceMeta::Poison }, layout }
+    }
+
+    #[inline]
+    pub fn offset(
+        &self,
+        offset: Size,
+        meta: MemPlaceMeta<Tag>,
+        layout: TyAndLayout<'tcx>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, Self> {
+        Ok(MPlaceTy { mplace: self.mplace.offset(offset, meta, cx)?, layout })
+    }
+
+    #[inline]
+    pub fn from_aligned_ptr(ptr: Pointer<Option<Tag>>, layout: TyAndLayout<'tcx>) -> Self {
+        MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout }
+    }
+
+    #[inline]
+    pub(super) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
+        if self.layout.is_unsized() {
+            // We need to consult `meta` metadata
+            match self.layout.ty.kind() {
+                ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_machine_usize(cx),
+                _ => bug!("len not supported on unsized type {:?}", self.layout.ty),
+            }
+        } else {
+            // Go through the layout.  There are lots of types that support a length,
+            // e.g., SIMD types.
+            match self.layout.fields {
+                FieldsShape::Array { count, .. } => Ok(count),
+                _ => bug!("len not supported on sized type {:?}", self.layout.ty),
+            }
+        }
+    }
+
+    #[inline]
+    pub(super) fn vtable(&self) -> Scalar<Tag> {
+        match self.layout.ty.kind() {
+            ty::Dynamic(..) => self.mplace.meta.unwrap_meta(),
+            _ => bug!("vtable not supported on type {:?}", self.layout.ty),
+        }
+    }
+}
+
+// These are defined here because they produce a place.
+impl<'tcx, Tag: Provenance> OpTy<'tcx, Tag> {
+    #[inline(always)]
+    /// Note: do not call `as_ref` on the resulting place. This function should only be used to
+    /// read from the resulting mplace, not to get its address back.
+    pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
+        match **self {
+            Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }),
+            Operand::Immediate(_) if self.layout.is_zst() => Ok(MPlaceTy::dangling(self.layout)),
+            Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)),
+        }
+    }
+
+    #[inline(always)]
+    /// Note: do not call `as_ref` on the resulting place. This function should only be used to
+    /// read from the resulting mplace, not to get its address back.
+    pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Tag> {
+        self.try_as_mplace().unwrap()
+    }
+}
+
+impl<Tag: Provenance> Place<Tag> {
+    #[inline]
+    pub fn assert_mem_place(self) -> MemPlace<Tag> {
+        match self {
+            Place::Ptr(mplace) => mplace,
+            _ => bug!("assert_mem_place: expected Place::Ptr, got {:?}", self),
+        }
+    }
+}
+
+impl<'tcx, Tag: Provenance> PlaceTy<'tcx, Tag> {
+    #[inline]
+    pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> {
+        MPlaceTy { mplace: self.place.assert_mem_place(), layout: self.layout }
+    }
+}
+
+// separating the pointer tag for `impl Trait`, see https://github.com/rust-lang/rust/issues/54385
+impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M>
+where
+    // FIXME: Working around https://github.com/rust-lang/rust/issues/54385
+    Tag: Provenance + Eq + Hash + 'static,
+    M: Machine<'mir, 'tcx, PointerTag = Tag>,
+{
+    /// Take a value, which represents a (thin or wide) reference, and make it a place.
+    /// Alignment is just based on the type.  This is the inverse of `MemPlace::to_ref()`.
+    ///
+    /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not
+    /// want to ever use the place for memory access!
+    /// Generally prefer `deref_operand`.
+    pub fn ref_to_mplace(
+        &self,
+        val: &ImmTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        let pointee_type =
+            val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty;
+        let layout = self.layout_of(pointee_type)?;
+        let (ptr, meta) = match **val {
+            Immediate::Scalar(ptr) => (ptr, MemPlaceMeta::None),
+            Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta.check_init()?)),
+        };
+
+        let mplace = MemPlace {
+            ptr: self.scalar_to_ptr(ptr.check_init()?),
+            // We could use the run-time alignment here. For now, we do not, because
+            // the point of tracking the alignment here is to make sure that the *static*
+            // alignment information emitted with the loads is correct. The run-time
+            // alignment can only be more restrictive.
+            align: layout.align.abi,
+            meta,
+        };
+        Ok(MPlaceTy { mplace, layout })
+    }
+
+    /// Take an operand, representing a pointer, and dereference it to a place -- that
+    /// will always be a MemPlace.  Lives in `place.rs` because it creates a place.
+    pub fn deref_operand(
+        &self,
+        src: &OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        let val = self.read_immediate(src)?;
+        trace!("deref to {} on {:?}", val.layout.ty, *val);
+        let mplace = self.ref_to_mplace(&val)?;
+        self.check_mplace_access(mplace, CheckInAllocMsg::DerefTest)?;
+        Ok(mplace)
+    }
+
+    #[inline]
+    pub(super) fn get_alloc(
+        &self,
+        place: &MPlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::PointerTag, M::AllocExtra>>> {
+        assert!(!place.layout.is_unsized());
+        assert!(!place.meta.has_meta());
+        let size = place.layout.size;
+        self.memory.get(place.ptr, size, place.align)
+    }
+
+    #[inline]
+    pub(super) fn get_alloc_mut(
+        &mut self,
+        place: &MPlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::PointerTag, M::AllocExtra>>> {
+        assert!(!place.layout.is_unsized());
+        assert!(!place.meta.has_meta());
+        let size = place.layout.size;
+        self.memory.get_mut(place.ptr, size, place.align)
+    }
+
+    /// Check if this mplace is dereferencable and sufficiently aligned.
+    fn check_mplace_access(
+        &self,
+        mplace: MPlaceTy<'tcx, M::PointerTag>,
+        msg: CheckInAllocMsg,
+    ) -> InterpResult<'tcx> {
+        let (size, align) = self
+            .size_and_align_of_mplace(&mplace)?
+            .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
+        assert!(mplace.mplace.align <= align, "dynamic alignment less strict than static one?");
+        let align = M::enforce_alignment(&self.memory.extra).then_some(align);
+        self.memory.check_ptr_access_align(mplace.ptr, size, align.unwrap_or(Align::ONE), msg)?;
+        Ok(())
+    }
+
+    /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
+    /// always possible without allocating, so it can take `&self`. Also return the field's layout.
+    /// This supports both struct and array fields.
+    ///
+    /// This also works for arrays, but then the `usize` index type is restricting.
+    /// For indexing into arrays, use `mplace_index`.
+    #[inline(always)]
+    pub fn mplace_field(
+        &self,
+        base: &MPlaceTy<'tcx, M::PointerTag>,
+        field: usize,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        let offset = base.layout.fields.offset(field);
+        let field_layout = base.layout.field(self, field);
+
+        // Offset may need adjustment for unsized fields.
+        let (meta, offset) = if field_layout.is_unsized() {
+            // Re-use parent metadata to determine dynamic field layout.
+            // With custom DSTS, this *will* execute user-defined code, but the same
+            // happens at run-time so that's okay.
+            let align = match self.size_and_align_of(&base.meta, &field_layout)? {
+                Some((_, align)) => align,
+                None if offset == Size::ZERO => {
+                    // An extern type at offset 0, we fall back to its static alignment.
+                    // FIXME: Once we have made decisions for how to handle size and alignment
+                    // of `extern type`, this should be adapted.  It is just a temporary hack
+                    // to get some code to work that probably ought to work.
+                    field_layout.align.abi
+                }
+                None => span_bug!(
+                    self.cur_span(),
+                    "cannot compute offset for extern type field at non-0 offset"
+                ),
+            };
+            (base.meta, offset.align_to(align))
+        } else {
+            // base.meta could be present; we might be accessing a sized field of an unsized
+            // struct.
+            (MemPlaceMeta::None, offset)
+        };
+
+        // We do not look at `base.layout.align` nor `field_layout.align`, unlike
+        // codegen -- mostly to see if we can get away with that
+        base.offset(offset, meta, field_layout, self)
+    }
+
+    /// Index into an array.
+    #[inline(always)]
+    pub fn mplace_index(
+        &self,
+        base: &MPlaceTy<'tcx, M::PointerTag>,
+        index: u64,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        // Not using the layout method because we want to compute on u64
+        match base.layout.fields {
+            FieldsShape::Array { stride, .. } => {
+                let len = base.len(self)?;
+                if index >= len {
+                    // This can only be reached in ConstProp and non-rustc-MIR.
+                    throw_ub!(BoundsCheckFailed { len, index });
+                }
+                let offset = stride * index; // `Size` multiplication
+                // All fields have the same layout.
+                let field_layout = base.layout.field(self, 0);
+
+                assert!(!field_layout.is_unsized());
+                base.offset(offset, MemPlaceMeta::None, field_layout, self)
+            }
+            _ => span_bug!(
+                self.cur_span(),
+                "`mplace_index` called on non-array type {:?}",
+                base.layout.ty
+            ),
+        }
+    }
+
+    // Iterates over all fields of an array. Much more efficient than doing the
+    // same by repeatedly calling `mplace_array`.
+    pub(super) fn mplace_array_fields(
+        &self,
+        base: &'a MPlaceTy<'tcx, Tag>,
+    ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'a>
+    {
+        let len = base.len(self)?; // also asserts that we have a type where this makes sense
+        let stride = match base.layout.fields {
+            FieldsShape::Array { stride, .. } => stride,
+            _ => span_bug!(self.cur_span(), "mplace_array_fields: expected an array layout"),
+        };
+        let layout = base.layout.field(self, 0);
+        let dl = &self.tcx.data_layout;
+        // `Size` multiplication
+        Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl)))
+    }
+
+    fn mplace_subslice(
+        &self,
+        base: &MPlaceTy<'tcx, M::PointerTag>,
+        from: u64,
+        to: u64,
+        from_end: bool,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        let len = base.len(self)?; // also asserts that we have a type where this makes sense
+        let actual_to = if from_end {
+            if from.checked_add(to).map_or(true, |to| to > len) {
+                // This can only be reached in ConstProp and non-rustc-MIR.
+                throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) });
+            }
+            len.checked_sub(to).unwrap()
+        } else {
+            to
+        };
+
+        // Not using layout method because that works with usize, and does not work with slices
+        // (that have count 0 in their layout).
+        let from_offset = match base.layout.fields {
+            FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked
+            _ => {
+                span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout)
+            }
+        };
+
+        // Compute meta and new layout
+        let inner_len = actual_to.checked_sub(from).unwrap();
+        let (meta, ty) = match base.layout.ty.kind() {
+            // It is not nice to match on the type, but that seems to be the only way to
+            // implement this.
+            ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(inner, inner_len)),
+            ty::Slice(..) => {
+                let len = Scalar::from_machine_usize(inner_len, self);
+                (MemPlaceMeta::Meta(len), base.layout.ty)
+            }
+            _ => {
+                span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty)
+            }
+        };
+        let layout = self.layout_of(ty)?;
+        base.offset(from_offset, meta, layout, self)
+    }
+
+    pub(crate) fn mplace_downcast(
+        &self,
+        base: &MPlaceTy<'tcx, M::PointerTag>,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        // Downcasts only change the layout
+        assert!(!base.meta.has_meta());
+        Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..*base })
+    }
+
+    /// Project into an mplace
+    pub(super) fn mplace_projection(
+        &self,
+        base: &MPlaceTy<'tcx, M::PointerTag>,
+        proj_elem: mir::PlaceElem<'tcx>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        use rustc_middle::mir::ProjectionElem::*;
+        Ok(match proj_elem {
+            Field(field, _) => self.mplace_field(base, field.index())?,
+            Downcast(_, variant) => self.mplace_downcast(base, variant)?,
+            Deref => self.deref_operand(&base.into())?,
+
+            Index(local) => {
+                let layout = self.layout_of(self.tcx.types.usize)?;
+                let n = self.access_local(self.frame(), local, Some(layout))?;
+                let n = self.read_scalar(&n)?;
+                let n = n.to_machine_usize(self)?;
+                self.mplace_index(base, n)?
+            }
+
+            ConstantIndex { offset, min_length, from_end } => {
+                let n = base.len(self)?;
+                if n < min_length {
+                    // This can only be reached in ConstProp and non-rustc-MIR.
+                    throw_ub!(BoundsCheckFailed { len: min_length, index: n });
+                }
+
+                let index = if from_end {
+                    assert!(0 < offset && offset <= min_length);
+                    n.checked_sub(offset).unwrap()
+                } else {
+                    assert!(offset < min_length);
+                    offset
+                };
+
+                self.mplace_index(base, index)?
+            }
+
+            Subslice { from, to, from_end } => self.mplace_subslice(base, from, to, from_end)?,
+        })
+    }
+
+    /// Gets the place of a field inside the place, and also the field's type.
+    /// Just a convenience function, but used quite a bit.
+    /// This is the only projection that might have a side-effect: We cannot project
+    /// into the field of a local `ScalarPair`, we have to first allocate it.
+    pub fn place_field(
+        &mut self,
+        base: &PlaceTy<'tcx, M::PointerTag>,
+        field: usize,
+    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+        // FIXME: We could try to be smarter and avoid allocation for fields that span the
+        // entire place.
+        let mplace = self.force_allocation(base)?;
+        Ok(self.mplace_field(&mplace, field)?.into())
+    }
+
+    pub fn place_index(
+        &mut self,
+        base: &PlaceTy<'tcx, M::PointerTag>,
+        index: u64,
+    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+        let mplace = self.force_allocation(base)?;
+        Ok(self.mplace_index(&mplace, index)?.into())
+    }
+
+    pub fn place_downcast(
+        &self,
+        base: &PlaceTy<'tcx, M::PointerTag>,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+        // Downcast just changes the layout
+        Ok(match base.place {
+            Place::Ptr(mplace) => {
+                self.mplace_downcast(&MPlaceTy { mplace, layout: base.layout }, variant)?.into()
+            }
+            Place::Local { .. } => {
+                let layout = base.layout.for_variant(self, variant);
+                PlaceTy { layout, ..*base }
+            }
+        })
+    }
+
+    /// Projects into a place.
+    pub fn place_projection(
+        &mut self,
+        base: &PlaceTy<'tcx, M::PointerTag>,
+        &proj_elem: &mir::ProjectionElem<mir::Local, Ty<'tcx>>,
+    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+        use rustc_middle::mir::ProjectionElem::*;
+        Ok(match proj_elem {
+            Field(field, _) => self.place_field(base, field.index())?,
+            Downcast(_, variant) => self.place_downcast(base, variant)?,
+            Deref => self.deref_operand(&self.place_to_op(base)?)?.into(),
+            // For the other variants, we have to force an allocation.
+            // This matches `operand_projection`.
+            Subslice { .. } | ConstantIndex { .. } | Index(_) => {
+                let mplace = self.force_allocation(base)?;
+                self.mplace_projection(&mplace, proj_elem)?.into()
+            }
+        })
+    }
+
+    /// Computes a place. You should only use this if you intend to write into this
+    /// place; for reading, a more efficient alternative is `eval_place_for_read`.
+    pub fn eval_place(
+        &mut self,
+        place: mir::Place<'tcx>,
+    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+        let mut place_ty = PlaceTy {
+            // This works even for dead/uninitialized locals; we check further when writing
+            place: Place::Local { frame: self.frame_idx(), local: place.local },
+            layout: self.layout_of_local(self.frame(), place.local, None)?,
+        };
+
+        for elem in place.projection.iter() {
+            place_ty = self.place_projection(&place_ty, &elem)?
+        }
+
+        trace!("{:?}", self.dump_place(place_ty.place));
+        // Sanity-check the type we ended up with.
+        debug_assert!(mir_assign_valid_types(
+            *self.tcx,
+            self.param_env,
+            self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
+                place.ty(&self.frame().body.local_decls, *self.tcx).ty
+            ))?,
+            place_ty.layout,
+        ));
+        Ok(place_ty)
+    }
+
+    /// Write an immediate to a place
+    #[inline(always)]
+    pub fn write_immediate(
+        &mut self,
+        src: Immediate<M::PointerTag>,
+        dest: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        self.write_immediate_no_validate(src, dest)?;
+
+        if M::enforce_validity(self) {
+            // Data got changed, better make sure it matches the type!
+            self.validate_operand(&self.place_to_op(dest)?)?;
+        }
+
+        Ok(())
+    }
+
+    /// Write a scalar to a place
+    #[inline(always)]
+    pub fn write_scalar(
+        &mut self,
+        val: impl Into<ScalarMaybeUninit<M::PointerTag>>,
+        dest: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        self.write_immediate(Immediate::Scalar(val.into()), dest)
+    }
+
+    /// Write a pointer to a place
+    #[inline(always)]
+    pub fn write_pointer(
+        &mut self,
+        ptr: impl Into<Pointer<Option<M::PointerTag>>>,
+        dest: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest)
+    }
+
+    /// Write an immediate to a place.
+    /// If you use this you are responsible for validating that things got copied at the
+    /// right type.
+    fn write_immediate_no_validate(
+        &mut self,
+        src: Immediate<M::PointerTag>,
+        dest: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        if cfg!(debug_assertions) {
+            // This is a very common path, avoid some checks in release mode
+            assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
+            match src {
+                Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Ptr(..))) => assert_eq!(
+                    self.pointer_size(),
+                    dest.layout.size,
+                    "Size mismatch when writing pointer"
+                ),
+                Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Int(int))) => {
+                    assert_eq!(int.size(), dest.layout.size, "Size mismatch when writing bits")
+                }
+                Immediate::Scalar(ScalarMaybeUninit::Uninit) => {} // uninit can have any size
+                Immediate::ScalarPair(_, _) => {
+                    // FIXME: Can we check anything here?
+                }
+            }
+        }
+        trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
+
+        // See if we can avoid an allocation. This is the counterpart to `try_read_immediate`,
+        // but not factored as a separate function.
+        let mplace = match dest.place {
+            Place::Local { frame, local } => {
+                match M::access_local_mut(self, frame, local)? {
+                    Ok(local) => {
+                        // Local can be updated in-place.
+                        *local = LocalValue::Live(Operand::Immediate(src));
+                        return Ok(());
+                    }
+                    Err(mplace) => {
+                        // The local is in memory, go on below.
+                        mplace
+                    }
+                }
+            }
+            Place::Ptr(mplace) => mplace, // already referring to memory
+        };
+        let dest = MPlaceTy { mplace, layout: dest.layout };
+
+        // This is already in memory, write there.
+        self.write_immediate_to_mplace_no_validate(src, &dest)
+    }
+
+    /// Write an immediate to memory.
+    /// If you use this you are responsible for validating that things got copied at the
+    /// right type.
+    fn write_immediate_to_mplace_no_validate(
+        &mut self,
+        value: Immediate<M::PointerTag>,
+        dest: &MPlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        // Note that it is really important that the type here is the right one, and matches the
+        // type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here
+        // to handle padding properly, which is only correct if we never look at this data with the
+        // wrong type.
+
+        // Invalid places are a thing: the return place of a diverging function
+        let tcx = *self.tcx;
+        let mut alloc = match self.get_alloc_mut(dest)? {
+            Some(a) => a,
+            None => return Ok(()), // zero-sized access
+        };
+
+        // FIXME: We should check that there are dest.layout.size many bytes available in
+        // memory.  The code below is not sufficient, with enough padding it might not
+        // cover all the bytes!
+        match value {
+            Immediate::Scalar(scalar) => {
+                match dest.layout.abi {
+                    Abi::Scalar(_) => {} // fine
+                    _ => span_bug!(
+                        self.cur_span(),
+                        "write_immediate_to_mplace: invalid Scalar layout: {:#?}",
+                        dest.layout
+                    ),
+                }
+                alloc.write_scalar(alloc_range(Size::ZERO, dest.layout.size), scalar)
+            }
+            Immediate::ScalarPair(a_val, b_val) => {
+                // We checked `ptr_align` above, so all fields will have the alignment they need.
+                // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
+                // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
+                let (a, b) = match dest.layout.abi {
+                    Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
+                    _ => span_bug!(
+                        self.cur_span(),
+                        "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
+                        dest.layout
+                    ),
+                };
+                let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
+                let b_offset = a_size.align_to(b.align(&tcx).abi);
+
+                // It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
+                // but that does not work: We could be a newtype around a pair, then the
+                // fields do not match the `ScalarPair` components.
+
+                alloc.write_scalar(alloc_range(Size::ZERO, a_size), a_val)?;
+                alloc.write_scalar(alloc_range(b_offset, b_size), b_val)
+            }
+        }
+    }
+
+    /// Copies the data from an operand to a place. This does not support transmuting!
+    /// Use `copy_op_transmute` if the layouts could disagree.
+    #[inline(always)]
+    pub fn copy_op(
+        &mut self,
+        src: &OpTy<'tcx, M::PointerTag>,
+        dest: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        self.copy_op_no_validate(src, dest)?;
+
+        if M::enforce_validity(self) {
+            // Data got changed, better make sure it matches the type!
+            self.validate_operand(&self.place_to_op(dest)?)?;
+        }
+
+        Ok(())
+    }
+
+    /// Copies the data from an operand to a place. This does not support transmuting!
+    /// Use `copy_op_transmute` if the layouts could disagree.
+    /// Also, if you use this you are responsible for validating that things get copied at the
+    /// right type.
+    fn copy_op_no_validate(
+        &mut self,
+        src: &OpTy<'tcx, M::PointerTag>,
+        dest: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        // We do NOT compare the types for equality, because well-typed code can
+        // actually "transmute" `&mut T` to `&T` in an assignment without a cast.
+        if !mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) {
+            span_bug!(
+                self.cur_span(),
+                "type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
+                src.layout.ty,
+                dest.layout.ty,
+            );
+        }
+
+        // Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
+        let src = match self.try_read_immediate(src)? {
+            Ok(src_val) => {
+                assert!(!src.layout.is_unsized(), "cannot have unsized immediates");
+                // Yay, we got a value that we can write directly.
+                // FIXME: Add a check to make sure that if `src` is indirect,
+                // it does not overlap with `dest`.
+                return self.write_immediate_no_validate(*src_val, dest);
+            }
+            Err(mplace) => mplace,
+        };
+        // Slow path, this does not fit into an immediate. Just memcpy.
+        trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
+
+        // This interprets `src.meta` with the `dest` local's layout, if an unsized local
+        // is being initialized!
+        let (dest, size) = self.force_allocation_maybe_sized(dest, src.meta)?;
+        let size = size.unwrap_or_else(|| {
+            assert!(
+                !dest.layout.is_unsized(),
+                "Cannot copy into already initialized unsized place"
+            );
+            dest.layout.size
+        });
+        assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances");
+
+        self.memory
+            .copy(src.ptr, src.align, dest.ptr, dest.align, size, /*nonoverlapping*/ true)
+    }
+
+    /// Copies the data from an operand to a place. The layouts may disagree, but they must
+    /// have the same size.
+    pub fn copy_op_transmute(
+        &mut self,
+        src: &OpTy<'tcx, M::PointerTag>,
+        dest: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        if mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) {
+            // Fast path: Just use normal `copy_op`
+            return self.copy_op(src, dest);
+        }
+        // We still require the sizes to match.
+        if src.layout.size != dest.layout.size {
+            // FIXME: This should be an assert instead of an error, but if we transmute within an
+            // array length computation, `typeck` may not have yet been run and errored out. In fact
+            // most likey we *are* running `typeck` right now. Investigate whether we can bail out
+            // on `typeck_results().has_errors` at all const eval entry points.
+            debug!("Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest);
+            self.tcx.sess.delay_span_bug(
+                self.cur_span(),
+                "size-changing transmute, should have been caught by transmute checking",
+            );
+            throw_inval!(TransmuteSizeDiff(src.layout.ty, dest.layout.ty));
+        }
+        // Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want
+        // to avoid that here.
+        assert!(
+            !src.layout.is_unsized() && !dest.layout.is_unsized(),
+            "Cannot transmute unsized data"
+        );
+
+        // The hard case is `ScalarPair`.  `src` is already read from memory in this case,
+        // using `src.layout` to figure out which bytes to use for the 1st and 2nd field.
+        // We have to write them to `dest` at the offsets they were *read at*, which is
+        // not necessarily the same as the offsets in `dest.layout`!
+        // Hence we do the copy with the source layout on both sides.  We also make sure to write
+        // into memory, because if `dest` is a local we would not even have a way to write
+        // at the `src` offsets; the fact that we came from a different layout would
+        // just be lost.
+        let dest = self.force_allocation(dest)?;
+        self.copy_op_no_validate(
+            src,
+            &PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout }),
+        )?;
+
+        if M::enforce_validity(self) {
+            // Data got changed, better make sure it matches the type!
+            self.validate_operand(&dest.into())?;
+        }
+
+        Ok(())
+    }
+
+    /// Ensures that a place is in memory, and returns where it is.
+    /// If the place currently refers to a local that doesn't yet have a matching allocation,
+    /// create such an allocation.
+    /// This is essentially `force_to_memplace`.
+    ///
+    /// This supports unsized types and returns the computed size to avoid some
+    /// redundant computation when copying; use `force_allocation` for a simpler, sized-only
+    /// version.
+    pub fn force_allocation_maybe_sized(
+        &mut self,
+        place: &PlaceTy<'tcx, M::PointerTag>,
+        meta: MemPlaceMeta<M::PointerTag>,
+    ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> {
+        let (mplace, size) = match place.place {
+            Place::Local { frame, local } => {
+                match M::access_local_mut(self, frame, local)? {
+                    Ok(&mut local_val) => {
+                        // We need to make an allocation.
+
+                        // We need the layout of the local.  We can NOT use the layout we got,
+                        // that might e.g., be an inner field of a struct with `Scalar` layout,
+                        // that has different alignment than the outer field.
+                        let local_layout =
+                            self.layout_of_local(&self.stack()[frame], local, None)?;
+                        // We also need to support unsized types, and hence cannot use `allocate`.
+                        let (size, align) = self
+                            .size_and_align_of(&meta, &local_layout)?
+                            .expect("Cannot allocate for non-dyn-sized type");
+                        let ptr = self.memory.allocate(size, align, MemoryKind::Stack)?;
+                        let mplace = MemPlace { ptr: ptr.into(), align, meta };
+                        if let LocalValue::Live(Operand::Immediate(value)) = local_val {
+                            // Preserve old value.
+                            // We don't have to validate as we can assume the local
+                            // was already valid for its type.
+                            let mplace = MPlaceTy { mplace, layout: local_layout };
+                            self.write_immediate_to_mplace_no_validate(value, &mplace)?;
+                        }
+                        // Now we can call `access_mut` again, asserting it goes well,
+                        // and actually overwrite things.
+                        *M::access_local_mut(self, frame, local).unwrap().unwrap() =
+                            LocalValue::Live(Operand::Indirect(mplace));
+                        (mplace, Some(size))
+                    }
+                    Err(mplace) => (mplace, None), // this already was an indirect local
+                }
+            }
+            Place::Ptr(mplace) => (mplace, None),
+        };
+        // Return with the original layout, so that the caller can go on
+        Ok((MPlaceTy { mplace, layout: place.layout }, size))
+    }
+
+    #[inline(always)]
+    pub fn force_allocation(
+        &mut self,
+        place: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        Ok(self.force_allocation_maybe_sized(place, MemPlaceMeta::None)?.0)
+    }
+
+    pub fn allocate(
+        &mut self,
+        layout: TyAndLayout<'tcx>,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> InterpResult<'static, MPlaceTy<'tcx, M::PointerTag>> {
+        let ptr = self.memory.allocate(layout.size, layout.align.abi, kind)?;
+        Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
+    }
+
+    /// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation.
+    pub fn allocate_str(
+        &mut self,
+        str: &str,
+        kind: MemoryKind<M::MemoryKind>,
+        mutbl: Mutability,
+    ) -> MPlaceTy<'tcx, M::PointerTag> {
+        let ptr = self.memory.allocate_bytes(str.as_bytes(), Align::ONE, kind, mutbl);
+        let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self);
+        let mplace =
+            MemPlace { ptr: ptr.into(), align: Align::ONE, meta: MemPlaceMeta::Meta(meta) };
+
+        let ty = self.tcx.mk_ref(
+            self.tcx.lifetimes.re_static,
+            ty::TypeAndMut { ty: self.tcx.types.str_, mutbl },
+        );
+        let layout = self.layout_of(ty).unwrap();
+        MPlaceTy { mplace, layout }
+    }
+
+    /// Writes the discriminant of the given variant.
+    pub fn write_discriminant(
+        &mut self,
+        variant_index: VariantIdx,
+        dest: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        // Layout computation excludes uninhabited variants from consideration
+        // therefore there's no way to represent those variants in the given layout.
+        if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
+            throw_ub!(Unreachable);
+        }
+
+        match dest.layout.variants {
+            Variants::Single { index } => {
+                assert_eq!(index, variant_index);
+            }
+            Variants::Multiple {
+                tag_encoding: TagEncoding::Direct,
+                tag: ref tag_layout,
+                tag_field,
+                ..
+            } => {
+                // No need to validate that the discriminant here because the
+                // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+                let discr_val =
+                    dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
+
+                // raw discriminants for enums are isize or bigger during
+                // their computation, but the in-memory tag is the smallest possible
+                // representation
+                let size = tag_layout.value.size(self);
+                let tag_val = size.truncate(discr_val);
+
+                let tag_dest = self.place_field(dest, tag_field)?;
+                self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
+            }
+            Variants::Multiple {
+                tag_encoding:
+                    TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+                tag: ref tag_layout,
+                tag_field,
+                ..
+            } => {
+                // No need to validate that the discriminant here because the
+                // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+                if variant_index != dataful_variant {
+                    let variants_start = niche_variants.start().as_u32();
+                    let variant_index_relative = variant_index
+                        .as_u32()
+                        .checked_sub(variants_start)
+                        .expect("overflow computing relative variant idx");
+                    // We need to use machine arithmetic when taking into account `niche_start`:
+                    // tag_val = variant_index_relative + niche_start_val
+                    let tag_layout = self.layout_of(tag_layout.value.to_int_ty(*self.tcx))?;
+                    let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+                    let variant_index_relative_val =
+                        ImmTy::from_uint(variant_index_relative, tag_layout);
+                    let tag_val = self.binary_op(
+                        mir::BinOp::Add,
+                        &variant_index_relative_val,
+                        &niche_start_val,
+                    )?;
+                    // Write result.
+                    let niche_dest = self.place_field(dest, tag_field)?;
+                    self.write_immediate(*tag_val, &niche_dest)?;
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    pub fn raw_const_to_mplace(
+        &self,
+        raw: ConstAlloc<'tcx>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        // This must be an allocation in `tcx`
+        let _ = self.tcx.global_alloc(raw.alloc_id);
+        let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?;
+        let layout = self.layout_of(raw.ty)?;
+        Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
+    }
+
+    /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
+    /// Also return some more information so drop doesn't have to run the same code twice.
+    pub(super) fn unpack_dyn_trait(
+        &self,
+        mplace: &MPlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
+        let vtable = self.scalar_to_ptr(mplace.vtable()); // also sanity checks the type
+        let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
+        let layout = self.layout_of(ty)?;
+
+        // More sanity checks
+        if cfg!(debug_assertions) {
+            let (size, align) = self.read_size_and_align_from_vtable(vtable)?;
+            assert_eq!(size, layout.size);
+            // only ABI alignment is preserved
+            assert_eq!(align, layout.align.abi);
+        }
+
+        let mplace = MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..**mplace }, layout };
+        Ok((instance, mplace))
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
new file mode 100644
index 00000000000..09bd07660a3
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -0,0 +1,316 @@
+//! This module contains the `InterpCx` methods for executing a single step of the interpreter.
+//!
+//! The main entry point is the `step` method.
+
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{InterpResult, Scalar};
+use rustc_middle::ty::layout::LayoutOf;
+
+use super::{InterpCx, Machine};
+
+/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
+/// same type as the result.
+#[inline]
+fn binop_left_homogeneous(op: mir::BinOp) -> bool {
+    use rustc_middle::mir::BinOp::*;
+    match op {
+        Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Offset | Shl | Shr => true,
+        Eq | Ne | Lt | Le | Gt | Ge => false,
+    }
+}
+/// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
+/// same type as the LHS.
+#[inline]
+fn binop_right_homogeneous(op: mir::BinOp) -> bool {
+    use rustc_middle::mir::BinOp::*;
+    match op {
+        Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
+        Offset | Shl | Shr => false,
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    pub fn run(&mut self) -> InterpResult<'tcx> {
+        while self.step()? {}
+        Ok(())
+    }
+
+    /// Returns `true` as long as there are more things to do.
+    ///
+    /// This is used by [priroda](https://github.com/oli-obk/priroda)
+    ///
+    /// This is marked `#inline(always)` to work around adverserial codegen when `opt-level = 3`
+    #[inline(always)]
+    pub fn step(&mut self) -> InterpResult<'tcx, bool> {
+        if self.stack().is_empty() {
+            return Ok(false);
+        }
+
+        let loc = match self.frame().loc {
+            Ok(loc) => loc,
+            Err(_) => {
+                // We are unwinding and this fn has no cleanup code.
+                // Just go on unwinding.
+                trace!("unwinding: skipping frame");
+                self.pop_stack_frame(/* unwinding */ true)?;
+                return Ok(true);
+            }
+        };
+        let basic_block = &self.body().basic_blocks()[loc.block];
+
+        let old_frames = self.frame_idx();
+
+        if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
+            assert_eq!(old_frames, self.frame_idx());
+            self.statement(stmt)?;
+            return Ok(true);
+        }
+
+        M::before_terminator(self)?;
+
+        let terminator = basic_block.terminator();
+        assert_eq!(old_frames, self.frame_idx());
+        self.terminator(terminator)?;
+        Ok(true)
+    }
+
+    /// Runs the interpretation logic for the given `mir::Statement` at the current frame and
+    /// statement counter. This also moves the statement counter forward.
+    pub fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
+        info!("{:?}", stmt);
+
+        use rustc_middle::mir::StatementKind::*;
+
+        // Some statements (e.g., box) push new stack frames.
+        // We have to record the stack frame number *before* executing the statement.
+        let frame_idx = self.frame_idx();
+
+        match &stmt.kind {
+            Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
+
+            SetDiscriminant { place, variant_index } => {
+                let dest = self.eval_place(**place)?;
+                self.write_discriminant(*variant_index, &dest)?;
+            }
+
+            // Mark locals as alive
+            StorageLive(local) => {
+                self.storage_live(*local)?;
+            }
+
+            // Mark locals as dead
+            StorageDead(local) => {
+                self.storage_dead(*local)?;
+            }
+
+            // No dynamic semantics attached to `FakeRead`; MIR
+            // interpreter is solely intended for borrowck'ed code.
+            FakeRead(..) => {}
+
+            // Stacked Borrows.
+            Retag(kind, place) => {
+                let dest = self.eval_place(**place)?;
+                M::retag(self, *kind, &dest)?;
+            }
+
+            // Call CopyNonOverlapping
+            CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping { src, dst, count }) => {
+                let src = self.eval_operand(src, None)?;
+                let dst = self.eval_operand(dst, None)?;
+                let count = self.eval_operand(count, None)?;
+                self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true)?;
+            }
+
+            // Statements we do not track.
+            AscribeUserType(..) => {}
+
+            // Currently, Miri discards Coverage statements. Coverage statements are only injected
+            // via an optional compile time MIR pass and have no side effects. Since Coverage
+            // statements don't exist at the source level, it is safe for Miri to ignore them, even
+            // for undefined behavior (UB) checks.
+            //
+            // A coverage counter inside a const expression (for example, a counter injected in a
+            // const function) is discarded when the const is evaluated at compile time. Whether
+            // this should change, and/or how to implement a const eval counter, is a subject of the
+            // following issue:
+            //
+            // FIXME(#73156): Handle source code coverage in const eval
+            Coverage(..) => {}
+
+            // Defined to do nothing. These are added by optimization passes, to avoid changing the
+            // size of MIR constantly.
+            Nop => {}
+
+            LlvmInlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
+        }
+
+        self.stack_mut()[frame_idx].loc.as_mut().unwrap().statement_index += 1;
+        Ok(())
+    }
+
+    /// Evaluate an assignment statement.
+    ///
+    /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
+    /// type writes its results directly into the memory specified by the place.
+    pub fn eval_rvalue_into_place(
+        &mut self,
+        rvalue: &mir::Rvalue<'tcx>,
+        place: mir::Place<'tcx>,
+    ) -> InterpResult<'tcx> {
+        let dest = self.eval_place(place)?;
+
+        use rustc_middle::mir::Rvalue::*;
+        match *rvalue {
+            ThreadLocalRef(did) => {
+                let ptr = M::thread_local_static_base_pointer(self, did)?;
+                self.write_pointer(ptr, &dest)?;
+            }
+
+            Use(ref operand) => {
+                // Avoid recomputing the layout
+                let op = self.eval_operand(operand, Some(dest.layout))?;
+                self.copy_op(&op, &dest)?;
+            }
+
+            BinaryOp(bin_op, box (ref left, ref right)) => {
+                let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
+                let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
+                let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
+                let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
+                self.binop_ignore_overflow(bin_op, &left, &right, &dest)?;
+            }
+
+            CheckedBinaryOp(bin_op, box (ref left, ref right)) => {
+                // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
+                let left = self.read_immediate(&self.eval_operand(left, None)?)?;
+                let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
+                let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
+                self.binop_with_overflow(bin_op, &left, &right, &dest)?;
+            }
+
+            UnaryOp(un_op, ref operand) => {
+                // The operand always has the same type as the result.
+                let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
+                let val = self.unary_op(un_op, &val)?;
+                assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
+                self.write_immediate(*val, &dest)?;
+            }
+
+            Aggregate(ref kind, ref operands) => {
+                let (dest, active_field_index) = match **kind {
+                    mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
+                        self.write_discriminant(variant_index, &dest)?;
+                        if adt_def.is_enum() {
+                            (self.place_downcast(&dest, variant_index)?, active_field_index)
+                        } else {
+                            (dest, active_field_index)
+                        }
+                    }
+                    _ => (dest, None),
+                };
+
+                for (i, operand) in operands.iter().enumerate() {
+                    let op = self.eval_operand(operand, None)?;
+                    // Ignore zero-sized fields.
+                    if !op.layout.is_zst() {
+                        let field_index = active_field_index.unwrap_or(i);
+                        let field_dest = self.place_field(&dest, field_index)?;
+                        self.copy_op(&op, &field_dest)?;
+                    }
+                }
+            }
+
+            Repeat(ref operand, _) => {
+                let src = self.eval_operand(operand, None)?;
+                assert!(!src.layout.is_unsized());
+                let dest = self.force_allocation(&dest)?;
+                let length = dest.len(self)?;
+
+                if length == 0 {
+                    // Nothing to copy... but let's still make sure that `dest` as a place is valid.
+                    self.get_alloc_mut(&dest)?;
+                } else {
+                    // Write the src to the first element.
+                    let first = self.mplace_field(&dest, 0)?;
+                    self.copy_op(&src, &first.into())?;
+
+                    // This is performance-sensitive code for big static/const arrays! So we
+                    // avoid writing each operand individually and instead just make many copies
+                    // of the first element.
+                    let elem_size = first.layout.size;
+                    let first_ptr = first.ptr;
+                    let rest_ptr = first_ptr.offset(elem_size, self)?;
+                    self.memory.copy_repeatedly(
+                        first_ptr,
+                        first.align,
+                        rest_ptr,
+                        first.align,
+                        elem_size,
+                        length - 1,
+                        /*nonoverlapping:*/ true,
+                    )?;
+                }
+            }
+
+            Len(place) => {
+                // FIXME(CTFE): don't allow computing the length of arrays in const eval
+                let src = self.eval_place(place)?;
+                let mplace = self.force_allocation(&src)?;
+                let len = mplace.len(self)?;
+                self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?;
+            }
+
+            AddressOf(_, place) | Ref(_, _, place) => {
+                let src = self.eval_place(place)?;
+                let place = self.force_allocation(&src)?;
+                self.write_immediate(place.to_ref(self), &dest)?;
+            }
+
+            NullaryOp(mir::NullOp::Box, _) => {
+                M::box_alloc(self, &dest)?;
+            }
+
+            NullaryOp(mir::NullOp::SizeOf, ty) => {
+                let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty);
+                let layout = self.layout_of(ty)?;
+                if layout.is_unsized() {
+                    // FIXME: This should be a span_bug (#80742)
+                    self.tcx.sess.delay_span_bug(
+                        self.frame().current_span(),
+                        &format!("SizeOf nullary MIR operator called for unsized type {}", ty),
+                    );
+                    throw_inval!(SizeOfUnsizedType(ty));
+                }
+                self.write_scalar(Scalar::from_machine_usize(layout.size.bytes(), self), &dest)?;
+            }
+
+            Cast(cast_kind, ref operand, cast_ty) => {
+                let src = self.eval_operand(operand, None)?;
+                let cast_ty = self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty);
+                self.cast(&src, cast_kind, cast_ty, &dest)?;
+            }
+
+            Discriminant(place) => {
+                let op = self.eval_place_to_op(place, None)?;
+                let discr_val = self.read_discriminant(&op)?.0;
+                self.write_scalar(discr_val, &dest)?;
+            }
+        }
+
+        trace!("{:?}", self.dump_place(*dest));
+
+        Ok(())
+    }
+
+    fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
+        info!("{:?}", terminator.kind);
+
+        self.eval_terminator(terminator)?;
+        if !self.stack().is_empty() {
+            if let Ok(loc) = self.frame().loc {
+                info!("// executing {:?}", loc.block);
+            }
+        }
+        Ok(())
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
new file mode 100644
index 00000000000..1c8e5e9e23c
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -0,0 +1,517 @@
+use std::borrow::Cow;
+use std::convert::TryFrom;
+
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::ty::layout::{self, LayoutOf as _, TyAndLayout};
+use rustc_middle::ty::Instance;
+use rustc_middle::{
+    mir,
+    ty::{self, Ty},
+};
+use rustc_target::abi;
+use rustc_target::spec::abi::Abi;
+
+use super::{
+    FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Scalar,
+    StackPopCleanup, StackPopUnwind,
+};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    fn fn_can_unwind(&self, attrs: CodegenFnAttrFlags, abi: Abi) -> bool {
+        layout::fn_can_unwind(*self.tcx, attrs, abi)
+    }
+
+    pub(super) fn eval_terminator(
+        &mut self,
+        terminator: &mir::Terminator<'tcx>,
+    ) -> InterpResult<'tcx> {
+        use rustc_middle::mir::TerminatorKind::*;
+        match terminator.kind {
+            Return => {
+                self.pop_stack_frame(/* unwinding */ false)?
+            }
+
+            Goto { target } => self.go_to_block(target),
+
+            SwitchInt { ref discr, ref targets, switch_ty } => {
+                let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
+                trace!("SwitchInt({:?})", *discr);
+                assert_eq!(discr.layout.ty, switch_ty);
+
+                // Branch to the `otherwise` case by default, if no match is found.
+                assert!(!targets.iter().is_empty());
+                let mut target_block = targets.otherwise();
+
+                for (const_int, target) in targets.iter() {
+                    // Compare using binary_op, to also support pointer values
+                    let res = self
+                        .overflowing_binary_op(
+                            mir::BinOp::Eq,
+                            &discr,
+                            &ImmTy::from_uint(const_int, discr.layout),
+                        )?
+                        .0;
+                    if res.to_bool()? {
+                        target_block = target;
+                        break;
+                    }
+                }
+
+                self.go_to_block(target_block);
+            }
+
+            Call { ref func, ref args, destination, ref cleanup, from_hir_call: _, fn_span: _ } => {
+                let old_stack = self.frame_idx();
+                let old_loc = self.frame().loc;
+                let func = self.eval_operand(func, None)?;
+                let (fn_val, abi, caller_can_unwind) = match *func.layout.ty.kind() {
+                    ty::FnPtr(sig) => {
+                        let caller_abi = sig.abi();
+                        let fn_ptr = self.read_pointer(&func)?;
+                        let fn_val = self.memory.get_fn(fn_ptr)?;
+                        (
+                            fn_val,
+                            caller_abi,
+                            self.fn_can_unwind(CodegenFnAttrFlags::empty(), caller_abi),
+                        )
+                    }
+                    ty::FnDef(def_id, substs) => {
+                        let sig = func.layout.ty.fn_sig(*self.tcx);
+                        (
+                            FnVal::Instance(
+                                self.resolve(ty::WithOptConstParam::unknown(def_id), substs)?,
+                            ),
+                            sig.abi(),
+                            self.fn_can_unwind(self.tcx.codegen_fn_attrs(def_id).flags, sig.abi()),
+                        )
+                    }
+                    _ => span_bug!(
+                        terminator.source_info.span,
+                        "invalid callee of type {:?}",
+                        func.layout.ty
+                    ),
+                };
+                let args = self.eval_operands(args)?;
+                let dest_place;
+                let ret = match destination {
+                    Some((dest, ret)) => {
+                        dest_place = self.eval_place(dest)?;
+                        Some((&dest_place, ret))
+                    }
+                    None => None,
+                };
+                self.eval_fn_call(
+                    fn_val,
+                    abi,
+                    &args[..],
+                    ret,
+                    match (cleanup, caller_can_unwind) {
+                        (Some(cleanup), true) => StackPopUnwind::Cleanup(*cleanup),
+                        (None, true) => StackPopUnwind::Skip,
+                        (_, false) => StackPopUnwind::NotAllowed,
+                    },
+                )?;
+                // Sanity-check that `eval_fn_call` either pushed a new frame or
+                // did a jump to another block.
+                if self.frame_idx() == old_stack && self.frame().loc == old_loc {
+                    span_bug!(terminator.source_info.span, "evaluating this call made no progress");
+                }
+            }
+
+            Drop { place, target, unwind } => {
+                let place = self.eval_place(place)?;
+                let ty = place.layout.ty;
+                trace!("TerminatorKind::drop: {:?}, type {}", place, ty);
+
+                let instance = Instance::resolve_drop_in_place(*self.tcx, ty);
+                self.drop_in_place(&place, instance, target, unwind)?;
+            }
+
+            Assert { ref cond, expected, ref msg, target, cleanup } => {
+                let cond_val =
+                    self.read_immediate(&self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?;
+                if expected == cond_val {
+                    self.go_to_block(target);
+                } else {
+                    M::assert_panic(self, msg, cleanup)?;
+                }
+            }
+
+            Abort => {
+                M::abort(self, "the program aborted execution".to_owned())?;
+            }
+
+            // When we encounter Resume, we've finished unwinding
+            // cleanup for the current stack frame. We pop it in order
+            // to continue unwinding the next frame
+            Resume => {
+                trace!("unwinding: resuming from cleanup");
+                // By definition, a Resume terminator means
+                // that we're unwinding
+                self.pop_stack_frame(/* unwinding */ true)?;
+                return Ok(());
+            }
+
+            // It is UB to ever encounter this.
+            Unreachable => throw_ub!(Unreachable),
+
+            // These should never occur for MIR we actually run.
+            DropAndReplace { .. }
+            | FalseEdge { .. }
+            | FalseUnwind { .. }
+            | Yield { .. }
+            | GeneratorDrop => span_bug!(
+                terminator.source_info.span,
+                "{:#?} should have been eliminated by MIR pass",
+                terminator.kind
+            ),
+
+            // Inline assembly can't be interpreted.
+            InlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
+        }
+
+        Ok(())
+    }
+
+    fn check_argument_compat(
+        rust_abi: bool,
+        caller: TyAndLayout<'tcx>,
+        callee: TyAndLayout<'tcx>,
+    ) -> bool {
+        if caller.ty == callee.ty {
+            // No question
+            return true;
+        }
+        if !rust_abi {
+            // Don't risk anything
+            return false;
+        }
+        // Compare layout
+        match (&caller.abi, &callee.abi) {
+            // Different valid ranges are okay (once we enforce validity,
+            // that will take care to make it UB to leave the range, just
+            // like for transmute).
+            (abi::Abi::Scalar(ref caller), abi::Abi::Scalar(ref callee)) => {
+                caller.value == callee.value
+            }
+            (
+                abi::Abi::ScalarPair(ref caller1, ref caller2),
+                abi::Abi::ScalarPair(ref callee1, ref callee2),
+            ) => caller1.value == callee1.value && caller2.value == callee2.value,
+            // Be conservative
+            _ => false,
+        }
+    }
+
+    /// Pass a single argument, checking the types for compatibility.
+    fn pass_argument(
+        &mut self,
+        rust_abi: bool,
+        caller_arg: &mut impl Iterator<Item = OpTy<'tcx, M::PointerTag>>,
+        callee_arg: &PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        if rust_abi && callee_arg.layout.is_zst() {
+            // Nothing to do.
+            trace!("Skipping callee ZST");
+            return Ok(());
+        }
+        let caller_arg = caller_arg.next().ok_or_else(|| {
+            err_ub_format!("calling a function with fewer arguments than it requires")
+        })?;
+        if rust_abi {
+            assert!(!caller_arg.layout.is_zst(), "ZSTs must have been already filtered out");
+        }
+        // Now, check
+        if !Self::check_argument_compat(rust_abi, caller_arg.layout, callee_arg.layout) {
+            throw_ub_format!(
+                "calling a function with argument of type {:?} passing data of type {:?}",
+                callee_arg.layout.ty,
+                caller_arg.layout.ty
+            )
+        }
+        // We allow some transmutes here
+        self.copy_op_transmute(&caller_arg, callee_arg)
+    }
+
+    /// Call this function -- pushing the stack frame and initializing the arguments.
+    fn eval_fn_call(
+        &mut self,
+        fn_val: FnVal<'tcx, M::ExtraFnVal>,
+        caller_abi: Abi,
+        args: &[OpTy<'tcx, M::PointerTag>],
+        ret: Option<(&PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
+        mut unwind: StackPopUnwind,
+    ) -> InterpResult<'tcx> {
+        trace!("eval_fn_call: {:#?}", fn_val);
+
+        let instance = match fn_val {
+            FnVal::Instance(instance) => instance,
+            FnVal::Other(extra) => {
+                return M::call_extra_fn(self, extra, caller_abi, args, ret, unwind);
+            }
+        };
+
+        let get_abi = |this: &Self, instance_ty: Ty<'tcx>| match instance_ty.kind() {
+            ty::FnDef(..) => instance_ty.fn_sig(*this.tcx).abi(),
+            ty::Closure(..) => Abi::RustCall,
+            ty::Generator(..) => Abi::Rust,
+            _ => span_bug!(this.cur_span(), "unexpected callee ty: {:?}", instance_ty),
+        };
+
+        // ABI check
+        let check_abi = |callee_abi: Abi| -> InterpResult<'tcx> {
+            let normalize_abi = |abi| match abi {
+                Abi::Rust | Abi::RustCall | Abi::RustIntrinsic | Abi::PlatformIntrinsic =>
+                // These are all the same ABI, really.
+                {
+                    Abi::Rust
+                }
+                abi => abi,
+            };
+            if normalize_abi(caller_abi) != normalize_abi(callee_abi) {
+                throw_ub_format!(
+                    "calling a function with ABI {} using caller ABI {}",
+                    callee_abi.name(),
+                    caller_abi.name()
+                )
+            }
+            Ok(())
+        };
+
+        match instance.def {
+            ty::InstanceDef::Intrinsic(..) => {
+                if M::enforce_abi(self) {
+                    check_abi(get_abi(self, instance.ty(*self.tcx, self.param_env)))?;
+                }
+                assert!(caller_abi == Abi::RustIntrinsic || caller_abi == Abi::PlatformIntrinsic);
+                M::call_intrinsic(self, instance, args, ret, unwind)
+            }
+            ty::InstanceDef::VtableShim(..)
+            | ty::InstanceDef::ReifyShim(..)
+            | ty::InstanceDef::ClosureOnceShim { .. }
+            | ty::InstanceDef::FnPtrShim(..)
+            | ty::InstanceDef::DropGlue(..)
+            | ty::InstanceDef::CloneShim(..)
+            | ty::InstanceDef::Item(_) => {
+                // We need MIR for this fn
+                let body =
+                    match M::find_mir_or_eval_fn(self, instance, caller_abi, args, ret, unwind)? {
+                        Some(body) => body,
+                        None => return Ok(()),
+                    };
+
+                // Check against the ABI of the MIR body we are calling (not the ABI of `instance`;
+                // these can differ when `find_mir_or_eval_fn` does something clever like resolve
+                // exported symbol names).
+                let callee_def_id = body.source.def_id();
+                let callee_abi = get_abi(self, self.tcx.type_of(callee_def_id));
+
+                if M::enforce_abi(self) {
+                    check_abi(callee_abi)?;
+                }
+
+                if !matches!(unwind, StackPopUnwind::NotAllowed)
+                    && !self
+                        .fn_can_unwind(self.tcx.codegen_fn_attrs(callee_def_id).flags, callee_abi)
+                {
+                    // The callee cannot unwind.
+                    unwind = StackPopUnwind::NotAllowed;
+                }
+
+                self.push_stack_frame(
+                    instance,
+                    body,
+                    ret.map(|p| p.0),
+                    StackPopCleanup::Goto { ret: ret.map(|p| p.1), unwind },
+                )?;
+
+                // If an error is raised here, pop the frame again to get an accurate backtrace.
+                // To this end, we wrap it all in a `try` block.
+                let res: InterpResult<'tcx> = try {
+                    trace!(
+                        "caller ABI: {:?}, args: {:#?}",
+                        caller_abi,
+                        args.iter()
+                            .map(|arg| (arg.layout.ty, format!("{:?}", **arg)))
+                            .collect::<Vec<_>>()
+                    );
+                    trace!(
+                        "spread_arg: {:?}, locals: {:#?}",
+                        body.spread_arg,
+                        body.args_iter()
+                            .map(|local| (
+                                local,
+                                self.layout_of_local(self.frame(), local, None).unwrap().ty
+                            ))
+                            .collect::<Vec<_>>()
+                    );
+
+                    // Figure out how to pass which arguments.
+                    // The Rust ABI is special: ZST get skipped.
+                    let rust_abi = match caller_abi {
+                        Abi::Rust | Abi::RustCall => true,
+                        _ => false,
+                    };
+                    // We have two iterators: Where the arguments come from,
+                    // and where they go to.
+
+                    // For where they come from: If the ABI is RustCall, we untuple the
+                    // last incoming argument.  These two iterators do not have the same type,
+                    // so to keep the code paths uniform we accept an allocation
+                    // (for RustCall ABI only).
+                    let caller_args: Cow<'_, [OpTy<'tcx, M::PointerTag>]> =
+                        if caller_abi == Abi::RustCall && !args.is_empty() {
+                            // Untuple
+                            let (untuple_arg, args) = args.split_last().unwrap();
+                            trace!("eval_fn_call: Will pass last argument by untupling");
+                            Cow::from(
+                                args.iter()
+                                    .map(|&a| Ok(a))
+                                    .chain(
+                                        (0..untuple_arg.layout.fields.count())
+                                            .map(|i| self.operand_field(untuple_arg, i)),
+                                    )
+                                    .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>(
+                                    )?,
+                            )
+                        } else {
+                            // Plain arg passing
+                            Cow::from(args)
+                        };
+                    // Skip ZSTs
+                    let mut caller_iter =
+                        caller_args.iter().filter(|op| !rust_abi || !op.layout.is_zst()).copied();
+
+                    // Now we have to spread them out across the callee's locals,
+                    // taking into account the `spread_arg`.  If we could write
+                    // this is a single iterator (that handles `spread_arg`), then
+                    // `pass_argument` would be the loop body. It takes care to
+                    // not advance `caller_iter` for ZSTs.
+                    for local in body.args_iter() {
+                        let dest = self.eval_place(mir::Place::from(local))?;
+                        if Some(local) == body.spread_arg {
+                            // Must be a tuple
+                            for i in 0..dest.layout.fields.count() {
+                                let dest = self.place_field(&dest, i)?;
+                                self.pass_argument(rust_abi, &mut caller_iter, &dest)?;
+                            }
+                        } else {
+                            // Normal argument
+                            self.pass_argument(rust_abi, &mut caller_iter, &dest)?;
+                        }
+                    }
+                    // Now we should have no more caller args
+                    if caller_iter.next().is_some() {
+                        throw_ub_format!("calling a function with more arguments than it expected")
+                    }
+                    // Don't forget to check the return type!
+                    if let Some((caller_ret, _)) = ret {
+                        let callee_ret = self.eval_place(mir::Place::return_place())?;
+                        if !Self::check_argument_compat(
+                            rust_abi,
+                            caller_ret.layout,
+                            callee_ret.layout,
+                        ) {
+                            throw_ub_format!(
+                                "calling a function with return type {:?} passing \
+                                     return place of type {:?}",
+                                callee_ret.layout.ty,
+                                caller_ret.layout.ty
+                            )
+                        }
+                    } else {
+                        let local = mir::RETURN_PLACE;
+                        let callee_layout = self.layout_of_local(self.frame(), local, None)?;
+                        if !callee_layout.abi.is_uninhabited() {
+                            throw_ub_format!("calling a returning function without a return place")
+                        }
+                    }
+                };
+                match res {
+                    Err(err) => {
+                        self.stack_mut().pop();
+                        Err(err)
+                    }
+                    Ok(()) => Ok(()),
+                }
+            }
+            // cannot use the shim here, because that will only result in infinite recursion
+            ty::InstanceDef::Virtual(_, idx) => {
+                let mut args = args.to_vec();
+                // We have to implement all "object safe receivers".  Currently we
+                // support built-in pointers `(&, &mut, Box)` as well as unsized-self.  We do
+                // not yet support custom self types.
+                // Also see `compiler/rustc_codegen_llvm/src/abi.rs` and `compiler/rustc_codegen_ssa/src/mir/block.rs`.
+                let receiver_place = match args[0].layout.ty.builtin_deref(true) {
+                    Some(_) => {
+                        // Built-in pointer.
+                        self.deref_operand(&args[0])?
+                    }
+                    None => {
+                        // Unsized self.
+                        args[0].assert_mem_place()
+                    }
+                };
+                // Find and consult vtable
+                let vtable = self.scalar_to_ptr(receiver_place.vtable());
+                let fn_val = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
+
+                // `*mut receiver_place.layout.ty` is almost the layout that we
+                // want for args[0]: We have to project to field 0 because we want
+                // a thin pointer.
+                assert!(receiver_place.layout.is_unsized());
+                let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
+                let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0);
+                // Adjust receiver argument.
+                args[0] = OpTy::from(ImmTy::from_immediate(
+                    Scalar::from_maybe_pointer(receiver_place.ptr, self).into(),
+                    this_receiver_ptr,
+                ));
+                trace!("Patched self operand to {:#?}", args[0]);
+                // recurse with concrete function
+                self.eval_fn_call(fn_val, caller_abi, &args, ret, unwind)
+            }
+        }
+    }
+
+    fn drop_in_place(
+        &mut self,
+        place: &PlaceTy<'tcx, M::PointerTag>,
+        instance: ty::Instance<'tcx>,
+        target: mir::BasicBlock,
+        unwind: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx> {
+        trace!("drop_in_place: {:?},\n  {:?}, {:?}", *place, place.layout.ty, instance);
+        // We take the address of the object.  This may well be unaligned, which is fine
+        // for us here.  However, unaligned accesses will probably make the actual drop
+        // implementation fail -- a problem shared by rustc.
+        let place = self.force_allocation(place)?;
+
+        let (instance, place) = match place.layout.ty.kind() {
+            ty::Dynamic(..) => {
+                // Dropping a trait object.
+                self.unpack_dyn_trait(&place)?
+            }
+            _ => (instance, place),
+        };
+
+        let arg = ImmTy::from_immediate(
+            place.to_ref(self),
+            self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
+        );
+
+        let ty = self.tcx.mk_unit(); // return type is ()
+        let dest = MPlaceTy::dangling(self.layout_of(ty)?);
+
+        self.eval_fn_call(
+            FnVal::Instance(instance),
+            Abi::Rust,
+            &[arg.into()],
+            Some((&dest.into(), target)),
+            match unwind {
+                Some(cleanup) => StackPopUnwind::Cleanup(cleanup),
+                None => StackPopUnwind::Skip,
+            },
+        )
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs
new file mode 100644
index 00000000000..a6ba00ec695
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/traits.rs
@@ -0,0 +1,142 @@
+use std::convert::TryFrom;
+
+use rustc_middle::mir::interpret::{InterpResult, Pointer, PointerArithmetic};
+use rustc_middle::ty::{
+    self, Ty, COMMON_VTABLE_ENTRIES, COMMON_VTABLE_ENTRIES_ALIGN,
+    COMMON_VTABLE_ENTRIES_DROPINPLACE, COMMON_VTABLE_ENTRIES_SIZE,
+};
+use rustc_target::abi::{Align, Size};
+
+use super::util::ensure_monomorphic_enough;
+use super::{FnVal, InterpCx, Machine};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Creates a dynamic vtable for the given type and vtable origin. This is used only for
+    /// objects.
+    ///
+    /// The `trait_ref` encodes the erased self type. Hence, if we are
+    /// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
+    /// `trait_ref` would map `T: Trait`.
+    pub fn get_vtable(
+        &mut self,
+        ty: Ty<'tcx>,
+        poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+    ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
+        trace!("get_vtable(trait_ref={:?})", poly_trait_ref);
+
+        let (ty, poly_trait_ref) = self.tcx.erase_regions((ty, poly_trait_ref));
+
+        // All vtables must be monomorphic, bail out otherwise.
+        ensure_monomorphic_enough(*self.tcx, ty)?;
+        ensure_monomorphic_enough(*self.tcx, poly_trait_ref)?;
+
+        let vtable_allocation = self.tcx.vtable_allocation(ty, poly_trait_ref);
+
+        let vtable_ptr = self.memory.global_base_pointer(Pointer::from(vtable_allocation))?;
+
+        Ok(vtable_ptr.into())
+    }
+
+    /// Resolves the function at the specified slot in the provided
+    /// vtable. Currently an index of '3' (`COMMON_VTABLE_ENTRIES.len()`)
+    /// corresponds to the first method declared in the trait of the provided vtable.
+    pub fn get_vtable_slot(
+        &self,
+        vtable: Pointer<Option<M::PointerTag>>,
+        idx: u64,
+    ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
+        let ptr_size = self.pointer_size();
+        let vtable_slot = vtable.offset(ptr_size * idx, self)?;
+        let vtable_slot = self
+            .memory
+            .get(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
+            .expect("cannot be a ZST");
+        let fn_ptr = self.scalar_to_ptr(vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?);
+        self.memory.get_fn(fn_ptr)
+    }
+
+    /// Returns the drop fn instance as well as the actual dynamic type.
+    pub fn read_drop_type_from_vtable(
+        &self,
+        vtable: Pointer<Option<M::PointerTag>>,
+    ) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
+        let pointer_size = self.pointer_size();
+        // We don't care about the pointee type; we just want a pointer.
+        let vtable = self
+            .memory
+            .get(
+                vtable,
+                pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES.len()).unwrap(),
+                self.tcx.data_layout.pointer_align.abi,
+            )?
+            .expect("cannot be a ZST");
+        let drop_fn = vtable
+            .read_ptr_sized(
+                pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_DROPINPLACE).unwrap(),
+            )?
+            .check_init()?;
+        // We *need* an instance here, no other kind of function value, to be able
+        // to determine the type.
+        let drop_instance = self.memory.get_fn(self.scalar_to_ptr(drop_fn))?.as_instance()?;
+        trace!("Found drop fn: {:?}", drop_instance);
+        let fn_sig = drop_instance.ty(*self.tcx, self.param_env).fn_sig(*self.tcx);
+        let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig);
+        // The drop function takes `*mut T` where `T` is the type being dropped, so get that.
+        let args = fn_sig.inputs();
+        if args.len() != 1 {
+            throw_ub!(InvalidVtableDropFn(fn_sig));
+        }
+        let ty =
+            args[0].builtin_deref(true).ok_or_else(|| err_ub!(InvalidVtableDropFn(fn_sig)))?.ty;
+        Ok((drop_instance, ty))
+    }
+
+    pub fn read_size_and_align_from_vtable(
+        &self,
+        vtable: Pointer<Option<M::PointerTag>>,
+    ) -> InterpResult<'tcx, (Size, Align)> {
+        let pointer_size = self.pointer_size();
+        // We check for `size = 3 * ptr_size`, which covers the drop fn (unused here),
+        // the size, and the align (which we read below).
+        let vtable = self
+            .memory
+            .get(
+                vtable,
+                pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES.len()).unwrap(),
+                self.tcx.data_layout.pointer_align.abi,
+            )?
+            .expect("cannot be a ZST");
+        let size = vtable
+            .read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_SIZE).unwrap())?
+            .check_init()?;
+        let size = size.to_machine_usize(self)?;
+        let align = vtable
+            .read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_ALIGN).unwrap())?
+            .check_init()?;
+        let align = align.to_machine_usize(self)?;
+        let align = Align::from_bytes(align).map_err(|e| err_ub!(InvalidVtableAlignment(e)))?;
+
+        if size >= self.tcx.data_layout.obj_size_bound() {
+            throw_ub!(InvalidVtableSize);
+        }
+        Ok((Size::from_bytes(size), align))
+    }
+
+    pub fn read_new_vtable_after_trait_upcasting_from_vtable(
+        &self,
+        vtable: Pointer<Option<M::PointerTag>>,
+        idx: u64,
+    ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
+        let pointer_size = self.pointer_size();
+
+        let vtable_slot = vtable.offset(pointer_size * idx, self)?;
+        let new_vtable = self
+            .memory
+            .get(vtable_slot, pointer_size, self.tcx.data_layout.pointer_align.abi)?
+            .expect("cannot be a ZST");
+
+        let new_vtable = self.scalar_to_ptr(new_vtable.read_ptr_sized(Size::ZERO)?.check_init()?);
+
+        Ok(new_vtable)
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
new file mode 100644
index 00000000000..eb0fdebb665
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -0,0 +1,84 @@
+use rustc_middle::mir::interpret::InterpResult;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable, TypeVisitor};
+use std::convert::TryInto;
+use std::ops::ControlFlow;
+
+/// Returns `true` if a used generic parameter requires substitution.
+crate fn ensure_monomorphic_enough<'tcx, T>(tcx: TyCtxt<'tcx>, ty: T) -> InterpResult<'tcx>
+where
+    T: TypeFoldable<'tcx>,
+{
+    debug!("ensure_monomorphic_enough: ty={:?}", ty);
+    if !ty.potentially_needs_subst() {
+        return Ok(());
+    }
+
+    struct FoundParam;
+    struct UsedParamsNeedSubstVisitor<'tcx> {
+        tcx: TyCtxt<'tcx>,
+    }
+
+    impl<'tcx> TypeVisitor<'tcx> for UsedParamsNeedSubstVisitor<'tcx> {
+        type BreakTy = FoundParam;
+
+        fn tcx_for_anon_const_substs(&self) -> Option<TyCtxt<'tcx>> {
+            Some(self.tcx)
+        }
+
+        fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+            if !ty.potentially_needs_subst() {
+                return ControlFlow::CONTINUE;
+            }
+
+            match *ty.kind() {
+                ty::Param(_) => ControlFlow::Break(FoundParam),
+                ty::Closure(def_id, substs)
+                | ty::Generator(def_id, substs, ..)
+                | ty::FnDef(def_id, substs) => {
+                    let unused_params = self.tcx.unused_generic_params(def_id);
+                    for (index, subst) in substs.into_iter().enumerate() {
+                        let index = index
+                            .try_into()
+                            .expect("more generic parameters than can fit into a `u32`");
+                        let is_used = unused_params.contains(index).map_or(true, |unused| !unused);
+                        // Only recurse when generic parameters in fns, closures and generators
+                        // are used and require substitution.
+                        match (is_used, subst.definitely_needs_subst(self.tcx)) {
+                            // Just in case there are closures or generators within this subst,
+                            // recurse.
+                            (true, true) => return subst.super_visit_with(self),
+                            // Confirm that polymorphization replaced the parameter with
+                            // `ty::Param`/`ty::ConstKind::Param`.
+                            (false, true) if cfg!(debug_assertions) => match subst.unpack() {
+                                ty::subst::GenericArgKind::Type(ty) => {
+                                    assert!(matches!(ty.kind(), ty::Param(_)))
+                                }
+                                ty::subst::GenericArgKind::Const(ct) => {
+                                    assert!(matches!(ct.val, ty::ConstKind::Param(_)))
+                                }
+                                ty::subst::GenericArgKind::Lifetime(..) => (),
+                            },
+                            _ => {}
+                        }
+                    }
+                    ControlFlow::CONTINUE
+                }
+                _ => ty.super_visit_with(self),
+            }
+        }
+
+        fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+            match c.val {
+                ty::ConstKind::Param(..) => ControlFlow::Break(FoundParam),
+                _ => c.super_visit_with(self),
+            }
+        }
+    }
+
+    let mut vis = UsedParamsNeedSubstVisitor { tcx };
+    if matches!(ty.visit_with(&mut vis), ControlFlow::Break(FoundParam)) {
+        throw_inval!(TooGeneric);
+    } else {
+        Ok(())
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
new file mode 100644
index 00000000000..03e0a8e7901
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -0,0 +1,965 @@
+//! Check the validity invariant of a given value, and tell the user
+//! where in the value it got violated.
+//! In const context, this goes even further and tries to approximate const safety.
+//! That's useful because it means other passes (e.g. promotion) can rely on `const`s
+//! to be const-safe.
+
+use std::convert::TryFrom;
+use std::fmt::Write;
+use std::num::NonZeroUsize;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_middle::mir::interpret::InterpError;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_target::abi::{Abi, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange};
+
+use std::hash::Hash;
+
+use super::{
+    alloc_range, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine,
+    MemPlaceMeta, OpTy, ScalarMaybeUninit, ValueVisitor,
+};
+
+macro_rules! throw_validation_failure {
+    ($where:expr, { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )?) => {{
+        let mut msg = String::new();
+        msg.push_str("encountered ");
+        write!(&mut msg, $($what_fmt),+).unwrap();
+        $(
+            msg.push_str(", but expected ");
+            write!(&mut msg, $($expected_fmt),+).unwrap();
+        )?
+        let path = rustc_middle::ty::print::with_no_trimmed_paths(|| {
+            let where_ = &$where;
+            if !where_.is_empty() {
+                let mut path = String::new();
+                write_path(&mut path, where_);
+                Some(path)
+            } else {
+                None
+            }
+        });
+        throw_ub!(ValidationFailure { path, msg })
+    }};
+}
+
+/// If $e throws an error matching the pattern, throw a validation failure.
+/// Other errors are passed back to the caller, unchanged -- and if they reach the root of
+/// the visitor, we make sure only validation errors and `InvalidProgram` errors are left.
+/// This lets you use the patterns as a kind of validation list, asserting which errors
+/// can possibly happen:
+///
+/// ```
+/// let v = try_validation!(some_fn(), some_path, {
+///     Foo | Bar | Baz => { "some failure" },
+/// });
+/// ```
+///
+/// An additional expected parameter can also be added to the failure message:
+///
+/// ```
+/// let v = try_validation!(some_fn(), some_path, {
+///     Foo | Bar | Baz => { "some failure" } expected { "something that wasn't a failure" },
+/// });
+/// ```
+///
+/// An additional nicety is that both parameters actually take format args, so you can just write
+/// the format string in directly:
+///
+/// ```
+/// let v = try_validation!(some_fn(), some_path, {
+///     Foo | Bar | Baz => { "{:?}", some_failure } expected { "{}", expected_value },
+/// });
+/// ```
+///
+macro_rules! try_validation {
+    ($e:expr, $where:expr,
+    $( $( $p:pat )|+ => { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )? ),+ $(,)?
+    ) => {{
+        match $e {
+            Ok(x) => x,
+            // We catch the error and turn it into a validation failure. We are okay with
+            // allocation here as this can only slow down builds that fail anyway.
+            Err(e) => match e.kind() {
+                $(
+                    $($p)|+ =>
+                       throw_validation_failure!(
+                            $where,
+                            { $( $what_fmt ),+ } $( expected { $( $expected_fmt ),+ } )?
+                        )
+                ),+,
+                #[allow(unreachable_patterns)]
+                _ => Err::<!, _>(e)?,
+            }
+        }
+    }};
+}
+
+/// We want to show a nice path to the invalid field for diagnostics,
+/// but avoid string operations in the happy case where no error happens.
+/// So we track a `Vec<PathElem>` where `PathElem` contains all the data we
+/// need to later print something for the user.
+#[derive(Copy, Clone, Debug)]
+pub enum PathElem {
+    Field(Symbol),
+    Variant(Symbol),
+    GeneratorState(VariantIdx),
+    CapturedVar(Symbol),
+    ArrayElem(usize),
+    TupleElem(usize),
+    Deref,
+    EnumTag,
+    GeneratorTag,
+    DynDowncast,
+}
+
+/// Extra things to check for during validation of CTFE results.
+pub enum CtfeValidationMode {
+    /// Regular validation, nothing special happening.
+    Regular,
+    /// Validation of a `const`.
+    /// `inner` says if this is an inner, indirect allocation (as opposed to the top-level const
+    /// allocation). Being an inner allocation makes a difference because the top-level allocation
+    /// of a `const` is copied for each use, but the inner allocations are implicitly shared.
+    /// `allow_static_ptrs` says if pointers to statics are permitted (which is the case for promoteds in statics).
+    Const { inner: bool, allow_static_ptrs: bool },
+}
+
+/// State for tracking recursive validation of references
+pub struct RefTracking<T, PATH = ()> {
+    pub seen: FxHashSet<T>,
+    pub todo: Vec<(T, PATH)>,
+}
+
+impl<T: Copy + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH> {
+    pub fn empty() -> Self {
+        RefTracking { seen: FxHashSet::default(), todo: vec![] }
+    }
+    pub fn new(op: T) -> Self {
+        let mut ref_tracking_for_consts =
+            RefTracking { seen: FxHashSet::default(), todo: vec![(op, PATH::default())] };
+        ref_tracking_for_consts.seen.insert(op);
+        ref_tracking_for_consts
+    }
+
+    pub fn track(&mut self, op: T, path: impl FnOnce() -> PATH) {
+        if self.seen.insert(op) {
+            trace!("Recursing below ptr {:#?}", op);
+            let path = path();
+            // Remember to come back to this later.
+            self.todo.push((op, path));
+        }
+    }
+}
+
+/// Format a path
+fn write_path(out: &mut String, path: &[PathElem]) {
+    use self::PathElem::*;
+
+    for elem in path.iter() {
+        match elem {
+            Field(name) => write!(out, ".{}", name),
+            EnumTag => write!(out, ".<enum-tag>"),
+            Variant(name) => write!(out, ".<enum-variant({})>", name),
+            GeneratorTag => write!(out, ".<generator-tag>"),
+            GeneratorState(idx) => write!(out, ".<generator-state({})>", idx.index()),
+            CapturedVar(name) => write!(out, ".<captured-var({})>", name),
+            TupleElem(idx) => write!(out, ".{}", idx),
+            ArrayElem(idx) => write!(out, "[{}]", idx),
+            // `.<deref>` does not match Rust syntax, but it is more readable for long paths -- and
+            // some of the other items here also are not Rust syntax.  Actually we can't
+            // even use the usual syntax because we are just showing the projections,
+            // not the root.
+            Deref => write!(out, ".<deref>"),
+            DynDowncast => write!(out, ".<dyn-downcast>"),
+        }
+        .unwrap()
+    }
+}
+
+// Formats such that a sentence like "expected something {}" to mean
+// "expected something <in the given range>" makes sense.
+fn wrapping_range_format(r: WrappingRange, max_hi: u128) -> String {
+    let WrappingRange { start: lo, end: hi } = r;
+    assert!(hi <= max_hi);
+    if lo > hi {
+        format!("less or equal to {}, or greater or equal to {}", hi, lo)
+    } else if lo == hi {
+        format!("equal to {}", lo)
+    } else if lo == 0 {
+        assert!(hi < max_hi, "should not be printing if the range covers everything");
+        format!("less or equal to {}", hi)
+    } else if hi == max_hi {
+        assert!(lo > 0, "should not be printing if the range covers everything");
+        format!("greater or equal to {}", lo)
+    } else {
+        format!("in the range {:?}", r)
+    }
+}
+
+struct ValidityVisitor<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    /// The `path` may be pushed to, but the part that is present when a function
+    /// starts must not be changed!  `visit_fields` and `visit_array` rely on
+    /// this stack discipline.
+    path: Vec<PathElem>,
+    ref_tracking: Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>>,
+    /// `None` indicates this is not validating for CTFE (but for runtime).
+    ctfe_mode: Option<CtfeValidationMode>,
+    ecx: &'rt InterpCx<'mir, 'tcx, M>,
+}
+
+impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, 'tcx, M> {
+    fn aggregate_field_path_elem(&mut self, layout: TyAndLayout<'tcx>, field: usize) -> PathElem {
+        // First, check if we are projecting to a variant.
+        match layout.variants {
+            Variants::Multiple { tag_field, .. } => {
+                if tag_field == field {
+                    return match layout.ty.kind() {
+                        ty::Adt(def, ..) if def.is_enum() => PathElem::EnumTag,
+                        ty::Generator(..) => PathElem::GeneratorTag,
+                        _ => bug!("non-variant type {:?}", layout.ty),
+                    };
+                }
+            }
+            Variants::Single { .. } => {}
+        }
+
+        // Now we know we are projecting to a field, so figure out which one.
+        match layout.ty.kind() {
+            // generators and closures.
+            ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => {
+                let mut name = None;
+                // FIXME this should be more descriptive i.e. CapturePlace instead of CapturedVar
+                // https://github.com/rust-lang/project-rfc-2229/issues/46
+                if let Some(local_def_id) = def_id.as_local() {
+                    let tables = self.ecx.tcx.typeck(local_def_id);
+                    if let Some(captured_place) =
+                        tables.closure_min_captures_flattened(*def_id).nth(field)
+                    {
+                        // Sometimes the index is beyond the number of upvars (seen
+                        // for a generator).
+                        let var_hir_id = captured_place.get_root_variable();
+                        let node = self.ecx.tcx.hir().get(var_hir_id);
+                        if let hir::Node::Binding(pat) = node {
+                            if let hir::PatKind::Binding(_, _, ident, _) = pat.kind {
+                                name = Some(ident.name);
+                            }
+                        }
+                    }
+                }
+
+                PathElem::CapturedVar(name.unwrap_or_else(|| {
+                    // Fall back to showing the field index.
+                    sym::integer(field)
+                }))
+            }
+
+            // tuples
+            ty::Tuple(_) => PathElem::TupleElem(field),
+
+            // enums
+            ty::Adt(def, ..) if def.is_enum() => {
+                // we might be projecting *to* a variant, or to a field *in* a variant.
+                match layout.variants {
+                    Variants::Single { index } => {
+                        // Inside a variant
+                        PathElem::Field(def.variants[index].fields[field].ident.name)
+                    }
+                    Variants::Multiple { .. } => bug!("we handled variants above"),
+                }
+            }
+
+            // other ADTs
+            ty::Adt(def, _) => PathElem::Field(def.non_enum_variant().fields[field].ident.name),
+
+            // arrays/slices
+            ty::Array(..) | ty::Slice(..) => PathElem::ArrayElem(field),
+
+            // dyn traits
+            ty::Dynamic(..) => PathElem::DynDowncast,
+
+            // nothing else has an aggregate layout
+            _ => bug!("aggregate_field_path_elem: got non-aggregate type {:?}", layout.ty),
+        }
+    }
+
+    fn with_elem<R>(
+        &mut self,
+        elem: PathElem,
+        f: impl FnOnce(&mut Self) -> InterpResult<'tcx, R>,
+    ) -> InterpResult<'tcx, R> {
+        // Remember the old state
+        let path_len = self.path.len();
+        // Record new element
+        self.path.push(elem);
+        // Perform operation
+        let r = f(self)?;
+        // Undo changes
+        self.path.truncate(path_len);
+        // Done
+        Ok(r)
+    }
+
+    fn check_wide_ptr_meta(
+        &mut self,
+        meta: MemPlaceMeta<M::PointerTag>,
+        pointee: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx> {
+        let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
+        match tail.kind() {
+            ty::Dynamic(..) => {
+                let vtable = self.ecx.scalar_to_ptr(meta.unwrap_meta());
+                // Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
+                try_validation!(
+                    self.ecx.memory.check_ptr_access_align(
+                        vtable,
+                        3 * self.ecx.tcx.data_layout.pointer_size, // drop, size, align
+                        self.ecx.tcx.data_layout.pointer_align.abi,
+                        CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
+                    ),
+                    self.path,
+                    err_ub!(DanglingIntPointer(..)) |
+                    err_ub!(PointerUseAfterFree(..)) =>
+                        { "dangling vtable pointer in wide pointer" },
+                    err_ub!(AlignmentCheckFailed { .. }) =>
+                        { "unaligned vtable pointer in wide pointer" },
+                    err_ub!(PointerOutOfBounds { .. }) =>
+                        { "too small vtable" },
+                );
+                try_validation!(
+                    self.ecx.read_drop_type_from_vtable(vtable),
+                    self.path,
+                    err_ub!(DanglingIntPointer(..)) |
+                    err_ub!(InvalidFunctionPointer(..)) =>
+                        { "invalid drop function pointer in vtable (not pointing to a function)" },
+                    err_ub!(InvalidVtableDropFn(..)) =>
+                        { "invalid drop function pointer in vtable (function has incompatible signature)" },
+                );
+                try_validation!(
+                    self.ecx.read_size_and_align_from_vtable(vtable),
+                    self.path,
+                    err_ub!(InvalidVtableSize) =>
+                        { "invalid vtable: size is bigger than largest supported object" },
+                    err_ub!(InvalidVtableAlignment(msg)) =>
+                        { "invalid vtable: alignment {}", msg },
+                    err_unsup!(ReadPointerAsBytes) => { "invalid size or align in vtable" },
+                );
+                // FIXME: More checks for the vtable.
+            }
+            ty::Slice(..) | ty::Str => {
+                let _len = try_validation!(
+                    meta.unwrap_meta().to_machine_usize(self.ecx),
+                    self.path,
+                    err_unsup!(ReadPointerAsBytes) => { "non-integer slice length in wide pointer" },
+                );
+                // We do not check that `len * elem_size <= isize::MAX`:
+                // that is only required for references, and there it falls out of the
+                // "dereferenceable" check performed by Stacked Borrows.
+            }
+            ty::Foreign(..) => {
+                // Unsized, but not wide.
+            }
+            _ => bug!("Unexpected unsized type tail: {:?}", tail),
+        }
+
+        Ok(())
+    }
+
+    /// Check a reference or `Box`.
+    fn check_safe_pointer(
+        &mut self,
+        value: &OpTy<'tcx, M::PointerTag>,
+        kind: &str,
+    ) -> InterpResult<'tcx> {
+        let value = try_validation!(
+            self.ecx.read_immediate(value),
+            self.path,
+            err_unsup!(ReadPointerAsBytes) => { "part of a pointer" } expected { "a proper pointer or integer value" },
+        );
+        // Handle wide pointers.
+        // Check metadata early, for better diagnostics
+        let place = try_validation!(
+            self.ecx.ref_to_mplace(&value),
+            self.path,
+            err_ub!(InvalidUninitBytes(None)) => { "uninitialized {}", kind },
+        );
+        if place.layout.is_unsized() {
+            self.check_wide_ptr_meta(place.meta, place.layout)?;
+        }
+        // Make sure this is dereferenceable and all.
+        let size_and_align = try_validation!(
+            self.ecx.size_and_align_of_mplace(&place),
+            self.path,
+            err_ub!(InvalidMeta(msg)) => { "invalid {} metadata: {}", kind, msg },
+        );
+        let (size, align) = size_and_align
+            // for the purpose of validity, consider foreign types to have
+            // alignment and size determined by the layout (size will be 0,
+            // alignment should take attributes into account).
+            .unwrap_or_else(|| (place.layout.size, place.layout.align.abi));
+        // Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
+        try_validation!(
+            self.ecx.memory.check_ptr_access_align(
+                place.ptr,
+                size,
+                align,
+                CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
+            ),
+            self.path,
+            err_ub!(AlignmentCheckFailed { required, has }) =>
+                {
+                    "an unaligned {} (required {} byte alignment but found {})",
+                    kind,
+                    required.bytes(),
+                    has.bytes()
+                },
+            err_ub!(DanglingIntPointer(0, _)) =>
+                { "a null {}", kind },
+            err_ub!(DanglingIntPointer(i, _)) =>
+                { "a dangling {} (address 0x{:x} is unallocated)", kind, i },
+            err_ub!(PointerOutOfBounds { .. }) =>
+                { "a dangling {} (going beyond the bounds of its allocation)", kind },
+            // This cannot happen during const-eval (because interning already detects
+            // dangling pointers), but it can happen in Miri.
+            err_ub!(PointerUseAfterFree(..)) =>
+                { "a dangling {} (use-after-free)", kind },
+        );
+        // Recursive checking
+        if let Some(ref mut ref_tracking) = self.ref_tracking {
+            // Proceed recursively even for ZST, no reason to skip them!
+            // `!` is a ZST and we want to validate it.
+            // Skip validation entirely for some external statics
+            if let Ok((alloc_id, _offset, _ptr)) = self.ecx.memory.ptr_try_get_alloc(place.ptr) {
+                // not a ZST
+                let alloc_kind = self.ecx.tcx.get_global_alloc(alloc_id);
+                if let Some(GlobalAlloc::Static(did)) = alloc_kind {
+                    assert!(!self.ecx.tcx.is_thread_local_static(did));
+                    assert!(self.ecx.tcx.is_static(did));
+                    if matches!(
+                        self.ctfe_mode,
+                        Some(CtfeValidationMode::Const { allow_static_ptrs: false, .. })
+                    ) {
+                        // See const_eval::machine::MemoryExtra::can_access_statics for why
+                        // this check is so important.
+                        // This check is reachable when the const just referenced the static,
+                        // but never read it (so we never entered `before_access_global`).
+                        throw_validation_failure!(self.path,
+                            { "a {} pointing to a static variable", kind }
+                        );
+                    }
+                    // We skip checking other statics. These statics must be sound by
+                    // themselves, and the only way to get broken statics here is by using
+                    // unsafe code.
+                    // The reasons we don't check other statics is twofold. For one, in all
+                    // sound cases, the static was already validated on its own, and second, we
+                    // trigger cycle errors if we try to compute the value of the other static
+                    // and that static refers back to us.
+                    // We might miss const-invalid data,
+                    // but things are still sound otherwise (in particular re: consts
+                    // referring to statics).
+                    return Ok(());
+                }
+            }
+            let path = &self.path;
+            ref_tracking.track(place, || {
+                // We need to clone the path anyway, make sure it gets created
+                // with enough space for the additional `Deref`.
+                let mut new_path = Vec::with_capacity(path.len() + 1);
+                new_path.clone_from(path);
+                new_path.push(PathElem::Deref);
+                new_path
+            });
+        }
+        Ok(())
+    }
+
+    fn read_scalar(
+        &self,
+        op: &OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
+        Ok(try_validation!(
+            self.ecx.read_scalar(op),
+            self.path,
+            err_unsup!(ReadPointerAsBytes) => { "(potentially part of) a pointer" } expected { "plain (non-pointer) bytes" },
+        ))
+    }
+
+    /// Check if this is a value of primitive type, and if yes check the validity of the value
+    /// at that type.  Return `true` if the type is indeed primitive.
+    fn try_visit_primitive(
+        &mut self,
+        value: &OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, bool> {
+        // Go over all the primitive types
+        let ty = value.layout.ty;
+        match ty.kind() {
+            ty::Bool => {
+                let value = self.read_scalar(value)?;
+                try_validation!(
+                    value.to_bool(),
+                    self.path,
+                    err_ub!(InvalidBool(..)) | err_ub!(InvalidUninitBytes(None)) =>
+                        { "{}", value } expected { "a boolean" },
+                );
+                Ok(true)
+            }
+            ty::Char => {
+                let value = self.read_scalar(value)?;
+                try_validation!(
+                    value.to_char(),
+                    self.path,
+                    err_ub!(InvalidChar(..)) | err_ub!(InvalidUninitBytes(None)) =>
+                        { "{}", value } expected { "a valid unicode scalar value (in `0..=0x10FFFF` but not in `0xD800..=0xDFFF`)" },
+                );
+                Ok(true)
+            }
+            ty::Float(_) | ty::Int(_) | ty::Uint(_) => {
+                let value = self.read_scalar(value)?;
+                // NOTE: Keep this in sync with the array optimization for int/float
+                // types below!
+                if self.ctfe_mode.is_some() {
+                    // Integers/floats in CTFE: Must be scalar bits, pointers are dangerous
+                    let is_bits = value.check_init().map_or(false, |v| v.try_to_int().is_ok());
+                    if !is_bits {
+                        throw_validation_failure!(self.path,
+                            { "{}", value } expected { "initialized plain (non-pointer) bytes" }
+                        )
+                    }
+                } else {
+                    // At run-time, for now, we accept *anything* for these types, including
+                    // uninit. We should fix that, but let's start low.
+                }
+                Ok(true)
+            }
+            ty::RawPtr(..) => {
+                // We are conservative with uninit for integers, but try to
+                // actually enforce the strict rules for raw pointers (mostly because
+                // that lets us re-use `ref_to_mplace`).
+                let place = try_validation!(
+                    self.ecx.read_immediate(value).and_then(|ref i| self.ecx.ref_to_mplace(i)),
+                    self.path,
+                    err_ub!(InvalidUninitBytes(None)) => { "uninitialized raw pointer" },
+                    err_unsup!(ReadPointerAsBytes) => { "part of a pointer" } expected { "a proper pointer or integer value" },
+                );
+                if place.layout.is_unsized() {
+                    self.check_wide_ptr_meta(place.meta, place.layout)?;
+                }
+                Ok(true)
+            }
+            ty::Ref(_, ty, mutbl) => {
+                if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. }))
+                    && *mutbl == hir::Mutability::Mut
+                {
+                    // A mutable reference inside a const? That does not seem right (except if it is
+                    // a ZST).
+                    let layout = self.ecx.layout_of(ty)?;
+                    if !layout.is_zst() {
+                        throw_validation_failure!(self.path, { "mutable reference in a `const`" });
+                    }
+                }
+                self.check_safe_pointer(value, "reference")?;
+                Ok(true)
+            }
+            ty::Adt(def, ..) if def.is_box() => {
+                self.check_safe_pointer(value, "box")?;
+                Ok(true)
+            }
+            ty::FnPtr(_sig) => {
+                let value = try_validation!(
+                    self.ecx.read_immediate(value),
+                    self.path,
+                    err_unsup!(ReadPointerAsBytes) => { "part of a pointer" } expected { "a proper pointer or integer value" },
+                );
+                // Make sure we print a `ScalarMaybeUninit` (and not an `ImmTy`) in the error
+                // message below.
+                let value = value.to_scalar_or_uninit();
+                let _fn = try_validation!(
+                    value.check_init().and_then(|ptr| self.ecx.memory.get_fn(self.ecx.scalar_to_ptr(ptr))),
+                    self.path,
+                    err_ub!(DanglingIntPointer(..)) |
+                    err_ub!(InvalidFunctionPointer(..)) |
+                    err_ub!(InvalidUninitBytes(None)) =>
+                        { "{}", value } expected { "a function pointer" },
+                );
+                // FIXME: Check if the signature matches
+                Ok(true)
+            }
+            ty::Never => throw_validation_failure!(self.path, { "a value of the never type `!`" }),
+            ty::Foreign(..) | ty::FnDef(..) => {
+                // Nothing to check.
+                Ok(true)
+            }
+            // The above should be all the primitive types. The rest is compound, we
+            // check them by visiting their fields/variants.
+            ty::Adt(..)
+            | ty::Tuple(..)
+            | ty::Array(..)
+            | ty::Slice(..)
+            | ty::Str
+            | ty::Dynamic(..)
+            | ty::Closure(..)
+            | ty::Generator(..) => Ok(false),
+            // Some types only occur during typechecking, they have no layout.
+            // We should not see them here and we could not check them anyway.
+            ty::Error(_)
+            | ty::Infer(..)
+            | ty::Placeholder(..)
+            | ty::Bound(..)
+            | ty::Param(..)
+            | ty::Opaque(..)
+            | ty::Projection(..)
+            | ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty),
+        }
+    }
+
+    fn visit_scalar(
+        &mut self,
+        op: &OpTy<'tcx, M::PointerTag>,
+        scalar_layout: &ScalarAbi,
+    ) -> InterpResult<'tcx> {
+        let value = self.read_scalar(op)?;
+        let valid_range = scalar_layout.valid_range.clone();
+        let WrappingRange { start: lo, end: hi } = valid_range;
+        // Determine the allowed range
+        // `max_hi` is as big as the size fits
+        let max_hi = u128::MAX >> (128 - op.layout.size.bits());
+        assert!(hi <= max_hi);
+        // We could also write `(hi + 1) % (max_hi + 1) == lo` but `max_hi + 1` overflows for `u128`
+        if (lo == 0 && hi == max_hi) || (hi + 1 == lo) {
+            // Nothing to check
+            return Ok(());
+        }
+        // At least one value is excluded. Get the bits.
+        let value = try_validation!(
+            value.check_init(),
+            self.path,
+            err_ub!(InvalidUninitBytes(None)) => { "{}", value }
+                expected { "something {}", wrapping_range_format(valid_range, max_hi) },
+        );
+        let bits = match value.try_to_int() {
+            Err(_) => {
+                // So this is a pointer then, and casting to an int failed.
+                // Can only happen during CTFE.
+                let ptr = self.ecx.scalar_to_ptr(value);
+                if lo == 1 && hi == max_hi {
+                    // Only null is the niche.  So make sure the ptr is NOT null.
+                    if self.ecx.memory.ptr_may_be_null(ptr) {
+                        throw_validation_failure!(self.path,
+                            { "a potentially null pointer" }
+                            expected {
+                                "something that cannot possibly fail to be {}",
+                                wrapping_range_format(valid_range, max_hi)
+                            }
+                        )
+                    }
+                    return Ok(());
+                } else {
+                    // Conservatively, we reject, because the pointer *could* have a bad
+                    // value.
+                    throw_validation_failure!(self.path,
+                        { "a pointer" }
+                        expected {
+                            "something that cannot possibly fail to be {}",
+                            wrapping_range_format(valid_range, max_hi)
+                        }
+                    )
+                }
+            }
+            Ok(int) => int.assert_bits(op.layout.size),
+        };
+        // Now compare. This is slightly subtle because this is a special "wrap-around" range.
+        if valid_range.contains(bits) {
+            Ok(())
+        } else {
+            throw_validation_failure!(self.path,
+                { "{}", bits }
+                expected { "something {}", wrapping_range_format(valid_range, max_hi) }
+            )
+        }
+    }
+}
+
+impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
+    for ValidityVisitor<'rt, 'mir, 'tcx, M>
+{
+    type V = OpTy<'tcx, M::PointerTag>;
+
+    #[inline(always)]
+    fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
+        &self.ecx
+    }
+
+    fn read_discriminant(
+        &mut self,
+        op: &OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, VariantIdx> {
+        self.with_elem(PathElem::EnumTag, move |this| {
+            Ok(try_validation!(
+                this.ecx.read_discriminant(op),
+                this.path,
+                err_ub!(InvalidTag(val)) =>
+                    { "{}", val } expected { "a valid enum tag" },
+                err_ub!(InvalidUninitBytes(None)) =>
+                    { "uninitialized bytes" } expected { "a valid enum tag" },
+                err_unsup!(ReadPointerAsBytes) =>
+                    { "a pointer" } expected { "a valid enum tag" },
+            )
+            .1)
+        })
+    }
+
+    #[inline]
+    fn visit_field(
+        &mut self,
+        old_op: &OpTy<'tcx, M::PointerTag>,
+        field: usize,
+        new_op: &OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        let elem = self.aggregate_field_path_elem(old_op.layout, field);
+        self.with_elem(elem, move |this| this.visit_value(new_op))
+    }
+
+    #[inline]
+    fn visit_variant(
+        &mut self,
+        old_op: &OpTy<'tcx, M::PointerTag>,
+        variant_id: VariantIdx,
+        new_op: &OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        let name = match old_op.layout.ty.kind() {
+            ty::Adt(adt, _) => PathElem::Variant(adt.variants[variant_id].ident.name),
+            // Generators also have variants
+            ty::Generator(..) => PathElem::GeneratorState(variant_id),
+            _ => bug!("Unexpected type with variant: {:?}", old_op.layout.ty),
+        };
+        self.with_elem(name, move |this| this.visit_value(new_op))
+    }
+
+    #[inline(always)]
+    fn visit_union(
+        &mut self,
+        _op: &OpTy<'tcx, M::PointerTag>,
+        _fields: NonZeroUsize,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    #[inline]
+    fn visit_value(&mut self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
+        trace!("visit_value: {:?}, {:?}", *op, op.layout);
+
+        // Check primitive types -- the leafs of our recursive descend.
+        if self.try_visit_primitive(op)? {
+            return Ok(());
+        }
+        // Sanity check: `builtin_deref` does not know any pointers that are not primitive.
+        assert!(op.layout.ty.builtin_deref(true).is_none());
+
+        // Special check preventing `UnsafeCell` in the inner part of constants
+        if let Some(def) = op.layout.ty.ty_adt_def() {
+            if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. }))
+                && Some(def.did) == self.ecx.tcx.lang_items().unsafe_cell_type()
+            {
+                throw_validation_failure!(self.path, { "`UnsafeCell` in a `const`" });
+            }
+        }
+
+        // Recursively walk the value at its type.
+        self.walk_value(op)?;
+
+        // *After* all of this, check the ABI.  We need to check the ABI to handle
+        // types like `NonNull` where the `Scalar` info is more restrictive than what
+        // the fields say (`rustc_layout_scalar_valid_range_start`).
+        // But in most cases, this will just propagate what the fields say,
+        // and then we want the error to point at the field -- so, first recurse,
+        // then check ABI.
+        //
+        // FIXME: We could avoid some redundant checks here. For newtypes wrapping
+        // scalars, we do the same check on every "level" (e.g., first we check
+        // MyNewtype and then the scalar in there).
+        match op.layout.abi {
+            Abi::Uninhabited => {
+                throw_validation_failure!(self.path,
+                    { "a value of uninhabited type {:?}", op.layout.ty }
+                );
+            }
+            Abi::Scalar(ref scalar_layout) => {
+                self.visit_scalar(op, scalar_layout)?;
+            }
+            Abi::ScalarPair { .. } | Abi::Vector { .. } => {
+                // These have fields that we already visited above, so we already checked
+                // all their scalar-level restrictions.
+                // There is also no equivalent to `rustc_layout_scalar_valid_range_start`
+                // that would make skipping them here an issue.
+            }
+            Abi::Aggregate { .. } => {
+                // Nothing to do.
+            }
+        }
+
+        Ok(())
+    }
+
+    fn visit_aggregate(
+        &mut self,
+        op: &OpTy<'tcx, M::PointerTag>,
+        fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
+    ) -> InterpResult<'tcx> {
+        match op.layout.ty.kind() {
+            ty::Str => {
+                let mplace = op.assert_mem_place(); // strings are never immediate
+                let len = mplace.len(self.ecx)?;
+                try_validation!(
+                    self.ecx.memory.read_bytes(mplace.ptr, Size::from_bytes(len)),
+                    self.path,
+                    err_ub!(InvalidUninitBytes(..)) => { "uninitialized data in `str`" },
+                    err_unsup!(ReadPointerAsBytes) => { "a pointer in `str`" },
+                );
+            }
+            ty::Array(tys, ..) | ty::Slice(tys)
+                // This optimization applies for types that can hold arbitrary bytes (such as
+                // integer and floating point types) or for structs or tuples with no fields.
+                // FIXME(wesleywiser) This logic could be extended further to arbitrary structs
+                // or tuples made up of integer/floating point types or inhabited ZSTs with no
+                // padding.
+                if matches!(tys.kind(), ty::Int(..) | ty::Uint(..) | ty::Float(..))
+                =>
+            {
+                // Optimized handling for arrays of integer/float type.
+
+                // Arrays cannot be immediate, slices are never immediate.
+                let mplace = op.assert_mem_place();
+                // This is the length of the array/slice.
+                let len = mplace.len(self.ecx)?;
+                // This is the element type size.
+                let layout = self.ecx.layout_of(tys)?;
+                // This is the size in bytes of the whole array. (This checks for overflow.)
+                let size = layout.size * len;
+
+                // Optimization: we just check the entire range at once.
+                // NOTE: Keep this in sync with the handling of integer and float
+                // types above, in `visit_primitive`.
+                // In run-time mode, we accept pointers in here.  This is actually more
+                // permissive than a per-element check would be, e.g., we accept
+                // a &[u8] that contains a pointer even though bytewise checking would
+                // reject it.  However, that's good: We don't inherently want
+                // to reject those pointers, we just do not have the machinery to
+                // talk about parts of a pointer.
+                // We also accept uninit, for consistency with the slow path.
+                let alloc = match self.ecx.memory.get(mplace.ptr, size, mplace.align)? {
+                    Some(a) => a,
+                    None => {
+                        // Size 0, nothing more to check.
+                        return Ok(());
+                    }
+                };
+
+                match alloc.check_bytes(
+                    alloc_range(Size::ZERO, size),
+                    /*allow_uninit_and_ptr*/ self.ctfe_mode.is_none(),
+                ) {
+                    // In the happy case, we needn't check anything else.
+                    Ok(()) => {}
+                    // Some error happened, try to provide a more detailed description.
+                    Err(err) => {
+                        // For some errors we might be able to provide extra information.
+                        // (This custom logic does not fit the `try_validation!` macro.)
+                        match err.kind() {
+                            err_ub!(InvalidUninitBytes(Some((_alloc_id, access)))) => {
+                                // Some byte was uninitialized, determine which
+                                // element that byte belongs to so we can
+                                // provide an index.
+                                let i = usize::try_from(
+                                    access.uninit_offset.bytes() / layout.size.bytes(),
+                                )
+                                .unwrap();
+                                self.path.push(PathElem::ArrayElem(i));
+
+                                throw_validation_failure!(self.path, { "uninitialized bytes" })
+                            }
+                            err_unsup!(ReadPointerAsBytes) => {
+                                throw_validation_failure!(self.path, { "a pointer" } expected { "plain (non-pointer) bytes" })
+                            }
+
+                            // Propagate upwards (that will also check for unexpected errors).
+                            _ => return Err(err),
+                        }
+                    }
+                }
+            }
+            // Fast path for arrays and slices of ZSTs. We only need to check a single ZST element
+            // of an array and not all of them, because there's only a single value of a specific
+            // ZST type, so either validation fails for all elements or none.
+            ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(tys)?.is_zst() => {
+                // Validate just the first element (if any).
+                self.walk_aggregate(op, fields.take(1))?
+            }
+            _ => {
+                self.walk_aggregate(op, fields)? // default handler
+            }
+        }
+        Ok(())
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    fn validate_operand_internal(
+        &self,
+        op: &OpTy<'tcx, M::PointerTag>,
+        path: Vec<PathElem>,
+        ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>>,
+        ctfe_mode: Option<CtfeValidationMode>,
+    ) -> InterpResult<'tcx> {
+        trace!("validate_operand_internal: {:?}, {:?}", *op, op.layout.ty);
+
+        // Construct a visitor
+        let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self };
+
+        // Run it.
+        match visitor.visit_value(&op) {
+            Ok(()) => Ok(()),
+            // Pass through validation failures.
+            Err(err) if matches!(err.kind(), err_ub!(ValidationFailure { .. })) => Err(err),
+            // Also pass through InvalidProgram, those just indicate that we could not
+            // validate and each caller will know best what to do with them.
+            Err(err) if matches!(err.kind(), InterpError::InvalidProgram(_)) => Err(err),
+            // Avoid other errors as those do not show *where* in the value the issue lies.
+            Err(err) => {
+                err.print_backtrace();
+                bug!("Unexpected error during validation: {}", err);
+            }
+        }
+    }
+
+    /// This function checks the data at `op` to be const-valid.
+    /// `op` is assumed to cover valid memory if it is an indirect operand.
+    /// It will error if the bits at the destination do not match the ones described by the layout.
+    ///
+    /// `ref_tracking` is used to record references that we encounter so that they
+    /// can be checked recursively by an outside driving loop.
+    ///
+    /// `constant` controls whether this must satisfy the rules for constants:
+    /// - no pointers to statics.
+    /// - no `UnsafeCell` or non-ZST `&mut`.
+    #[inline(always)]
+    pub fn const_validate_operand(
+        &self,
+        op: &OpTy<'tcx, M::PointerTag>,
+        path: Vec<PathElem>,
+        ref_tracking: &mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>,
+        ctfe_mode: CtfeValidationMode,
+    ) -> InterpResult<'tcx> {
+        self.validate_operand_internal(op, path, Some(ref_tracking), Some(ctfe_mode))
+    }
+
+    /// This function checks the data at `op` to be runtime-valid.
+    /// `op` is assumed to cover valid memory if it is an indirect operand.
+    /// It will error if the bits at the destination do not match the ones described by the layout.
+    #[inline(always)]
+    pub fn validate_operand(&self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
+        self.validate_operand_internal(op, vec![], None, None)
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
new file mode 100644
index 00000000000..679d30227f1
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -0,0 +1,278 @@
+//! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound
+//! types until we arrive at the leaves, with custom handling for primitive types.
+
+use rustc_middle::mir::interpret::InterpResult;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::{FieldsShape, VariantIdx, Variants};
+
+use std::num::NonZeroUsize;
+
+use super::{InterpCx, MPlaceTy, Machine, OpTy};
+
+// A thing that we can project into, and that has a layout.
+// This wouldn't have to depend on `Machine` but with the current type inference,
+// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
+pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Copy {
+    /// Gets this value's layout.
+    fn layout(&self) -> TyAndLayout<'tcx>;
+
+    /// Makes this into an `OpTy`.
+    fn to_op(&self, ecx: &InterpCx<'mir, 'tcx, M>)
+    -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>;
+
+    /// Creates this from an `MPlaceTy`.
+    fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self;
+
+    /// Projects to the given enum variant.
+    fn project_downcast(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, Self>;
+
+    /// Projects to the n-th field.
+    fn project_field(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        field: usize,
+    ) -> InterpResult<'tcx, Self>;
+}
+
+// Operands and memory-places are both values.
+// Places in general are not due to `place_field` having to do `force_allocation`.
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::PointerTag> {
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    #[inline(always)]
+    fn to_op(
+        &self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        Ok(*self)
+    }
+
+    #[inline(always)]
+    fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self {
+        mplace.into()
+    }
+
+    #[inline(always)]
+    fn project_downcast(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, Self> {
+        ecx.operand_downcast(self, variant)
+    }
+
+    #[inline(always)]
+    fn project_field(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        field: usize,
+    ) -> InterpResult<'tcx, Self> {
+        ecx.operand_field(self, field)
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M>
+    for MPlaceTy<'tcx, M::PointerTag>
+{
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    #[inline(always)]
+    fn to_op(
+        &self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        Ok((*self).into())
+    }
+
+    #[inline(always)]
+    fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self {
+        mplace
+    }
+
+    #[inline(always)]
+    fn project_downcast(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, Self> {
+        ecx.mplace_downcast(self, variant)
+    }
+
+    #[inline(always)]
+    fn project_field(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        field: usize,
+    ) -> InterpResult<'tcx, Self> {
+        ecx.mplace_field(self, field)
+    }
+}
+
+macro_rules! make_value_visitor {
+    ($visitor_trait_name:ident, $($mutability:ident)?) => {
+        // How to traverse a value and what to do when we are at the leaves.
+        pub trait $visitor_trait_name<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
+            type V: Value<'mir, 'tcx, M>;
+
+            /// The visitor must have an `InterpCx` in it.
+            fn ecx(&$($mutability)? self)
+                -> &$($mutability)? InterpCx<'mir, 'tcx, M>;
+
+            /// `read_discriminant` can be hooked for better error messages.
+            #[inline(always)]
+            fn read_discriminant(
+                &mut self,
+                op: &OpTy<'tcx, M::PointerTag>,
+            ) -> InterpResult<'tcx, VariantIdx> {
+                Ok(self.ecx().read_discriminant(op)?.1)
+            }
+
+            // Recursive actions, ready to be overloaded.
+            /// Visits the given value, dispatching as appropriate to more specialized visitors.
+            #[inline(always)]
+            fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
+            {
+                self.walk_value(v)
+            }
+            /// Visits the given value as a union. No automatic recursion can happen here.
+            #[inline(always)]
+            fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx>
+            {
+                Ok(())
+            }
+            /// Visits this value as an aggregate, you are getting an iterator yielding
+            /// all the fields (still in an `InterpResult`, you have to do error handling yourself).
+            /// Recurses into the fields.
+            #[inline(always)]
+            fn visit_aggregate(
+                &mut self,
+                v: &Self::V,
+                fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
+            ) -> InterpResult<'tcx> {
+                self.walk_aggregate(v, fields)
+            }
+
+            /// Called each time we recurse down to a field of a "product-like" aggregate
+            /// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
+            /// and new (inner) value.
+            /// This gives the visitor the chance to track the stack of nested fields that
+            /// we are descending through.
+            #[inline(always)]
+            fn visit_field(
+                &mut self,
+                _old_val: &Self::V,
+                _field: usize,
+                new_val: &Self::V,
+            ) -> InterpResult<'tcx> {
+                self.visit_value(new_val)
+            }
+            /// Called when recursing into an enum variant.
+            /// This gives the visitor the chance to track the stack of nested fields that
+            /// we are descending through.
+            #[inline(always)]
+            fn visit_variant(
+                &mut self,
+                _old_val: &Self::V,
+                _variant: VariantIdx,
+                new_val: &Self::V,
+            ) -> InterpResult<'tcx> {
+                self.visit_value(new_val)
+            }
+
+            // Default recursors. Not meant to be overloaded.
+            fn walk_aggregate(
+                &mut self,
+                v: &Self::V,
+                fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
+            ) -> InterpResult<'tcx> {
+                // Now iterate over it.
+                for (idx, field_val) in fields.enumerate() {
+                    self.visit_field(v, idx, &field_val?)?;
+                }
+                Ok(())
+            }
+            fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
+            {
+                trace!("walk_value: type: {}", v.layout().ty);
+
+                // Special treatment for special types, where the (static) layout is not sufficient.
+                match *v.layout().ty.kind() {
+                    // If it is a trait object, switch to the real type that was used to create it.
+                    ty::Dynamic(..) => {
+                        // immediate trait objects are not a thing
+                        let op = v.to_op(self.ecx())?;
+                        let dest = op.assert_mem_place();
+                        let inner = self.ecx().unpack_dyn_trait(&dest)?.1;
+                        trace!("walk_value: dyn object layout: {:#?}", inner.layout);
+                        // recurse with the inner type
+                        return self.visit_field(&v, 0, &Value::from_mem_place(inner));
+                    },
+                    // Slices do not need special handling here: they have `Array` field
+                    // placement with length 0, so we enter the `Array` case below which
+                    // indirectly uses the metadata to determine the actual length.
+                    _ => {},
+                };
+
+                // Visit the fields of this value.
+                match v.layout().fields {
+                    FieldsShape::Primitive => {},
+                    FieldsShape::Union(fields) => {
+                        self.visit_union(v, fields)?;
+                    },
+                    FieldsShape::Arbitrary { ref offsets, .. } => {
+                        // FIXME: We collect in a vec because otherwise there are lifetime
+                        // errors: Projecting to a field needs access to `ecx`.
+                        let fields: Vec<InterpResult<'tcx, Self::V>> =
+                            (0..offsets.len()).map(|i| {
+                                v.project_field(self.ecx(), i)
+                            })
+                            .collect();
+                        self.visit_aggregate(v, fields.into_iter())?;
+                    },
+                    FieldsShape::Array { .. } => {
+                        // Let's get an mplace first.
+                        let op = v.to_op(self.ecx())?;
+                        let mplace = op.assert_mem_place();
+                        // Now we can go over all the fields.
+                        // This uses the *run-time length*, i.e., if we are a slice,
+                        // the dynamic info from the metadata is used.
+                        let iter = self.ecx().mplace_array_fields(&mplace)?
+                            .map(|f| f.and_then(|f| {
+                                Ok(Value::from_mem_place(f))
+                            }));
+                        self.visit_aggregate(v, iter)?;
+                    }
+                }
+
+                match v.layout().variants {
+                    // If this is a multi-variant layout, find the right variant and proceed
+                    // with *its* fields.
+                    Variants::Multiple { .. } => {
+                        let op = v.to_op(self.ecx())?;
+                        let idx = self.read_discriminant(&op)?;
+                        let inner = v.project_downcast(self.ecx(), idx)?;
+                        trace!("walk_value: variant layout: {:#?}", inner.layout());
+                        // recurse with the inner type
+                        self.visit_variant(v, idx, &inner)
+                    }
+                    // For single-variant layouts, we already did anything there is to do.
+                    Variants::Single { .. } => Ok(())
+                }
+            }
+        }
+    }
+}
+
+make_value_visitor!(ValueVisitor,);
+make_value_visitor!(MutValueVisitor, mut);
diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs
new file mode 100644
index 00000000000..c24c9a9b5ee
--- /dev/null
+++ b/compiler/rustc_const_eval/src/lib.rs
@@ -0,0 +1,56 @@
+/*!
+
+Rust MIR: a lowered representation of Rust.
+
+*/
+
+#![feature(assert_matches)]
+#![cfg_attr(bootstrap, feature(bindings_after_at))]
+#![feature(bool_to_option)]
+#![feature(box_patterns)]
+#![feature(control_flow_enum)]
+#![feature(crate_visibility_modifier)]
+#![feature(decl_macro)]
+#![feature(exact_size_is_empty)]
+#![feature(in_band_lifetimes)]
+#![feature(iter_zip)]
+#![feature(map_try_insert)]
+#![feature(min_specialization)]
+#![feature(slice_ptr_get)]
+#![feature(option_get_or_insert_default)]
+#![feature(never_type)]
+#![feature(trait_alias)]
+#![feature(trusted_len)]
+#![feature(trusted_step)]
+#![feature(try_blocks)]
+
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+
+pub mod const_eval;
+pub mod interpret;
+pub mod transform;
+pub mod util;
+
+use rustc_middle::ty::query::Providers;
+
+pub fn provide(providers: &mut Providers) {
+    const_eval::provide(providers);
+    providers.eval_to_const_value_raw = const_eval::eval_to_const_value_raw_provider;
+    providers.eval_to_allocation_raw = const_eval::eval_to_allocation_raw_provider;
+    providers.const_caller_location = const_eval::const_caller_location;
+    providers.destructure_const = |tcx, param_env_and_value| {
+        let (param_env, value) = param_env_and_value.into_parts();
+        const_eval::destructure_const(tcx, param_env, value)
+    };
+    providers.const_to_valtree = |tcx, param_env_and_value| {
+        let (param_env, raw) = param_env_and_value.into_parts();
+        const_eval::const_to_valtree(tcx, param_env, raw)
+    };
+    providers.deref_const = |tcx, param_env_and_value| {
+        let (param_env, value) = param_env_and_value.into_parts();
+        const_eval::deref_const(tcx, param_env, value)
+    };
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
new file mode 100644
index 00000000000..d02b4286c17
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -0,0 +1,1110 @@
+//! The `Visitor` responsible for actually checking a `mir::Body` for invalid operations.
+
+use rustc_errors::{Applicability, Diagnostic, ErrorReported};
+use rustc_hir::def_id::DefId;
+use rustc_hir::{self as hir, HirId, LangItem};
+use rustc_index::bit_set::BitSet;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
+use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::cast::CastTy;
+use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
+use rustc_middle::ty::{self, adjustment::PointerCast, Instance, InstanceDef, Ty, TyCtxt};
+use rustc_middle::ty::{Binder, TraitPredicate, TraitRef};
+use rustc_mir_dataflow::impls::MaybeMutBorrowedLocals;
+use rustc_mir_dataflow::{self, Analysis};
+use rustc_span::{sym, Span, Symbol};
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt;
+use rustc_trait_selection::traits::{self, SelectionContext, TraitEngine};
+
+use std::mem;
+use std::ops::Deref;
+
+use super::ops::{self, NonConstOp, Status};
+use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop};
+use super::resolver::FlowSensitiveAnalysis;
+use super::{is_lang_panic_fn, ConstCx, Qualif};
+use crate::const_eval::is_unstable_const_fn;
+
+// We are using `MaybeMutBorrowedLocals` as a proxy for whether an item may have been mutated
+// through a pointer prior to the given point. This is okay even though `MaybeMutBorrowedLocals`
+// kills locals upon `StorageDead` because a local will never be used after a `StorageDead`.
+type IndirectlyMutableResults<'mir, 'tcx> =
+    rustc_mir_dataflow::ResultsCursor<'mir, 'tcx, MaybeMutBorrowedLocals<'mir, 'tcx>>;
+
+type QualifResults<'mir, 'tcx, Q> =
+    rustc_mir_dataflow::ResultsCursor<'mir, 'tcx, FlowSensitiveAnalysis<'mir, 'mir, 'tcx, Q>>;
+
+#[derive(Default)]
+pub struct Qualifs<'mir, 'tcx> {
+    has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>,
+    needs_drop: Option<QualifResults<'mir, 'tcx, NeedsDrop>>,
+    indirectly_mutable: Option<IndirectlyMutableResults<'mir, 'tcx>>,
+}
+
+impl Qualifs<'mir, 'tcx> {
+    pub fn indirectly_mutable(
+        &mut self,
+        ccx: &'mir ConstCx<'mir, 'tcx>,
+        local: Local,
+        location: Location,
+    ) -> bool {
+        let indirectly_mutable = self.indirectly_mutable.get_or_insert_with(|| {
+            let ConstCx { tcx, body, param_env, .. } = *ccx;
+
+            // We can use `unsound_ignore_borrow_on_drop` here because custom drop impls are not
+            // allowed in a const.
+            //
+            // FIXME(ecstaticmorse): Someday we want to allow custom drop impls. How do we do this
+            // without breaking stable code?
+            MaybeMutBorrowedLocals::mut_borrows_only(tcx, &body, param_env)
+                .unsound_ignore_borrow_on_drop()
+                .into_engine(tcx, &body)
+                .pass_name("const_qualification")
+                .iterate_to_fixpoint()
+                .into_results_cursor(&body)
+        });
+
+        indirectly_mutable.seek_before_primary_effect(location);
+        indirectly_mutable.get().contains(local)
+    }
+
+    /// Returns `true` if `local` is `NeedsDrop` at the given `Location`.
+    ///
+    /// Only updates the cursor if absolutely necessary
+    pub fn needs_drop(
+        &mut self,
+        ccx: &'mir ConstCx<'mir, 'tcx>,
+        local: Local,
+        location: Location,
+    ) -> bool {
+        let ty = ccx.body.local_decls[local].ty;
+        if !NeedsDrop::in_any_value_of_ty(ccx, ty) {
+            return false;
+        }
+
+        let needs_drop = self.needs_drop.get_or_insert_with(|| {
+            let ConstCx { tcx, body, .. } = *ccx;
+
+            FlowSensitiveAnalysis::new(NeedsDrop, ccx)
+                .into_engine(tcx, &body)
+                .iterate_to_fixpoint()
+                .into_results_cursor(&body)
+        });
+
+        needs_drop.seek_before_primary_effect(location);
+        needs_drop.get().contains(local) || self.indirectly_mutable(ccx, local, location)
+    }
+
+    /// Returns `true` if `local` is `HasMutInterior` at the given `Location`.
+    ///
+    /// Only updates the cursor if absolutely necessary.
+    pub fn has_mut_interior(
+        &mut self,
+        ccx: &'mir ConstCx<'mir, 'tcx>,
+        local: Local,
+        location: Location,
+    ) -> bool {
+        let ty = ccx.body.local_decls[local].ty;
+        if !HasMutInterior::in_any_value_of_ty(ccx, ty) {
+            return false;
+        }
+
+        let has_mut_interior = self.has_mut_interior.get_or_insert_with(|| {
+            let ConstCx { tcx, body, .. } = *ccx;
+
+            FlowSensitiveAnalysis::new(HasMutInterior, ccx)
+                .into_engine(tcx, &body)
+                .iterate_to_fixpoint()
+                .into_results_cursor(&body)
+        });
+
+        has_mut_interior.seek_before_primary_effect(location);
+        has_mut_interior.get().contains(local) || self.indirectly_mutable(ccx, local, location)
+    }
+
+    fn in_return_place(
+        &mut self,
+        ccx: &'mir ConstCx<'mir, 'tcx>,
+        error_occured: Option<ErrorReported>,
+    ) -> ConstQualifs {
+        // Find the `Return` terminator if one exists.
+        //
+        // If no `Return` terminator exists, this MIR is divergent. Just return the conservative
+        // qualifs for the return type.
+        let return_block = ccx
+            .body
+            .basic_blocks()
+            .iter_enumerated()
+            .find(|(_, block)| match block.terminator().kind {
+                TerminatorKind::Return => true,
+                _ => false,
+            })
+            .map(|(bb, _)| bb);
+
+        let return_block = match return_block {
+            None => return qualifs::in_any_value_of_ty(ccx, ccx.body.return_ty(), error_occured),
+            Some(bb) => bb,
+        };
+
+        let return_loc = ccx.body.terminator_loc(return_block);
+
+        let custom_eq = match ccx.const_kind() {
+            // We don't care whether a `const fn` returns a value that is not structurally
+            // matchable. Functions calls are opaque and always use type-based qualification, so
+            // this value should never be used.
+            hir::ConstContext::ConstFn => true,
+
+            // If we know that all values of the return type are structurally matchable, there's no
+            // need to run dataflow.
+            _ if !CustomEq::in_any_value_of_ty(ccx, ccx.body.return_ty()) => false,
+
+            hir::ConstContext::Const | hir::ConstContext::Static(_) => {
+                let mut cursor = FlowSensitiveAnalysis::new(CustomEq, ccx)
+                    .into_engine(ccx.tcx, &ccx.body)
+                    .iterate_to_fixpoint()
+                    .into_results_cursor(&ccx.body);
+
+                cursor.seek_after_primary_effect(return_loc);
+                cursor.contains(RETURN_PLACE)
+            }
+        };
+
+        ConstQualifs {
+            needs_drop: self.needs_drop(ccx, RETURN_PLACE, return_loc),
+            has_mut_interior: self.has_mut_interior(ccx, RETURN_PLACE, return_loc),
+            custom_eq,
+            error_occured,
+        }
+    }
+}
+
+pub struct Checker<'mir, 'tcx> {
+    ccx: &'mir ConstCx<'mir, 'tcx>,
+    qualifs: Qualifs<'mir, 'tcx>,
+
+    /// The span of the current statement.
+    span: Span,
+
+    /// A set that stores for each local whether it has a `StorageDead` for it somewhere.
+    local_has_storage_dead: Option<BitSet<Local>>,
+
+    error_emitted: Option<ErrorReported>,
+    secondary_errors: Vec<Diagnostic>,
+}
+
+impl Deref for Checker<'mir, 'tcx> {
+    type Target = ConstCx<'mir, 'tcx>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.ccx
+    }
+}
+
+impl Checker<'mir, 'tcx> {
+    pub fn new(ccx: &'mir ConstCx<'mir, 'tcx>) -> Self {
+        Checker {
+            span: ccx.body.span,
+            ccx,
+            qualifs: Default::default(),
+            local_has_storage_dead: None,
+            error_emitted: None,
+            secondary_errors: Vec::new(),
+        }
+    }
+
+    pub fn check_body(&mut self) {
+        let ConstCx { tcx, body, .. } = *self.ccx;
+        let def_id = self.ccx.def_id();
+
+        // `async` functions cannot be `const fn`. This is checked during AST lowering, so there's
+        // no need to emit duplicate errors here.
+        if is_async_fn(self.ccx) || body.generator.is_some() {
+            tcx.sess.delay_span_bug(body.span, "`async` functions cannot be `const fn`");
+            return;
+        }
+
+        // The local type and predicate checks are not free and only relevant for `const fn`s.
+        if self.const_kind() == hir::ConstContext::ConstFn {
+            // Prevent const trait methods from being annotated as `stable`.
+            // FIXME: Do this as part of stability checking.
+            if self.is_const_stable_const_fn() {
+                let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+                if crate::const_eval::is_parent_const_impl_raw(tcx, hir_id) {
+                    self.ccx
+                        .tcx
+                        .sess
+                        .struct_span_err(self.span, "trait methods cannot be stable const fn")
+                        .emit();
+                }
+            }
+
+            self.check_item_predicates();
+
+            for (idx, local) in body.local_decls.iter_enumerated() {
+                // Handle the return place below.
+                if idx == RETURN_PLACE || local.internal {
+                    continue;
+                }
+
+                self.span = local.source_info.span;
+                self.check_local_or_return_ty(local.ty, idx);
+            }
+
+            // impl trait is gone in MIR, so check the return type of a const fn by its signature
+            // instead of the type of the return place.
+            self.span = body.local_decls[RETURN_PLACE].source_info.span;
+            let return_ty = tcx.fn_sig(def_id).output();
+            self.check_local_or_return_ty(return_ty.skip_binder(), RETURN_PLACE);
+        }
+
+        self.visit_body(&body);
+
+        // Ensure that the end result is `Sync` in a non-thread local `static`.
+        let should_check_for_sync = self.const_kind()
+            == hir::ConstContext::Static(hir::Mutability::Not)
+            && !tcx.is_thread_local_static(def_id.to_def_id());
+
+        if should_check_for_sync {
+            let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+            check_return_ty_is_sync(tcx, &body, hir_id);
+        }
+
+        // If we got through const-checking without emitting any "primary" errors, emit any
+        // "secondary" errors if they occurred.
+        let secondary_errors = mem::take(&mut self.secondary_errors);
+        if self.error_emitted.is_none() {
+            for error in secondary_errors {
+                self.tcx.sess.diagnostic().emit_diagnostic(&error);
+            }
+        } else {
+            assert!(self.tcx.sess.has_errors());
+        }
+    }
+
+    fn local_has_storage_dead(&mut self, local: Local) -> bool {
+        let ccx = self.ccx;
+        self.local_has_storage_dead
+            .get_or_insert_with(|| {
+                struct StorageDeads {
+                    locals: BitSet<Local>,
+                }
+                impl Visitor<'tcx> for StorageDeads {
+                    fn visit_statement(&mut self, stmt: &Statement<'tcx>, _: Location) {
+                        if let StatementKind::StorageDead(l) = stmt.kind {
+                            self.locals.insert(l);
+                        }
+                    }
+                }
+                let mut v = StorageDeads { locals: BitSet::new_empty(ccx.body.local_decls.len()) };
+                v.visit_body(ccx.body);
+                v.locals
+            })
+            .contains(local)
+    }
+
+    pub fn qualifs_in_return_place(&mut self) -> ConstQualifs {
+        self.qualifs.in_return_place(self.ccx, self.error_emitted)
+    }
+
+    /// Emits an error if an expression cannot be evaluated in the current context.
+    pub fn check_op(&mut self, op: impl NonConstOp) {
+        self.check_op_spanned(op, self.span);
+    }
+
+    /// Emits an error at the given `span` if an expression cannot be evaluated in the current
+    /// context.
+    pub fn check_op_spanned<O: NonConstOp>(&mut self, op: O, span: Span) {
+        let gate = match op.status_in_item(self.ccx) {
+            Status::Allowed => return,
+
+            Status::Unstable(gate) if self.tcx.features().enabled(gate) => {
+                let unstable_in_stable = self.ccx.is_const_stable_const_fn()
+                    && !super::rustc_allow_const_fn_unstable(
+                        self.tcx,
+                        self.def_id().to_def_id(),
+                        gate,
+                    );
+                if unstable_in_stable {
+                    emit_unstable_in_stable_error(self.ccx, span, gate);
+                }
+
+                return;
+            }
+
+            Status::Unstable(gate) => Some(gate),
+            Status::Forbidden => None,
+        };
+
+        if self.tcx.sess.opts.debugging_opts.unleash_the_miri_inside_of_you {
+            self.tcx.sess.miri_unleashed_feature(span, gate);
+            return;
+        }
+
+        let mut err = op.build_error(self.ccx, span);
+        assert!(err.is_error());
+
+        match op.importance() {
+            ops::DiagnosticImportance::Primary => {
+                self.error_emitted = Some(ErrorReported);
+                err.emit();
+            }
+
+            ops::DiagnosticImportance::Secondary => err.buffer(&mut self.secondary_errors),
+        }
+    }
+
+    fn check_static(&mut self, def_id: DefId, span: Span) {
+        if self.tcx.is_thread_local_static(def_id) {
+            self.tcx.sess.delay_span_bug(span, "tls access is checked in `Rvalue::ThreadLocalRef");
+        }
+        self.check_op_spanned(ops::StaticAccess, span)
+    }
+
+    fn check_local_or_return_ty(&mut self, ty: Ty<'tcx>, local: Local) {
+        let kind = self.body.local_kind(local);
+
+        for ty in ty.walk(self.tcx) {
+            let ty = match ty.unpack() {
+                GenericArgKind::Type(ty) => ty,
+
+                // No constraints on lifetimes or constants, except potentially
+                // constants' types, but `walk` will get to them as well.
+                GenericArgKind::Lifetime(_) | GenericArgKind::Const(_) => continue,
+            };
+
+            match *ty.kind() {
+                ty::Ref(_, _, hir::Mutability::Mut) => self.check_op(ops::ty::MutRef(kind)),
+                ty::Opaque(..) => self.check_op(ops::ty::ImplTrait),
+                ty::FnPtr(..) => self.check_op(ops::ty::FnPtr(kind)),
+
+                ty::Dynamic(preds, _) => {
+                    for pred in preds.iter() {
+                        match pred.skip_binder() {
+                            ty::ExistentialPredicate::AutoTrait(_)
+                            | ty::ExistentialPredicate::Projection(_) => {
+                                self.check_op(ops::ty::TraitBound(kind))
+                            }
+                            ty::ExistentialPredicate::Trait(trait_ref) => {
+                                if Some(trait_ref.def_id) != self.tcx.lang_items().sized_trait() {
+                                    self.check_op(ops::ty::TraitBound(kind))
+                                }
+                            }
+                        }
+                    }
+                }
+                _ => {}
+            }
+        }
+    }
+
+    fn check_item_predicates(&mut self) {
+        let ConstCx { tcx, .. } = *self.ccx;
+
+        let mut current = self.def_id().to_def_id();
+        loop {
+            let predicates = tcx.predicates_of(current);
+            for (predicate, _) in predicates.predicates {
+                match predicate.kind().skip_binder() {
+                    ty::PredicateKind::RegionOutlives(_)
+                    | ty::PredicateKind::TypeOutlives(_)
+                    | ty::PredicateKind::WellFormed(_)
+                    | ty::PredicateKind::Projection(_)
+                    | ty::PredicateKind::ConstEvaluatable(..)
+                    | ty::PredicateKind::ConstEquate(..)
+                    | ty::PredicateKind::TypeWellFormedFromEnv(..) => continue,
+                    ty::PredicateKind::ObjectSafe(_) => {
+                        bug!("object safe predicate on function: {:#?}", predicate)
+                    }
+                    ty::PredicateKind::ClosureKind(..) => {
+                        bug!("closure kind predicate on function: {:#?}", predicate)
+                    }
+                    ty::PredicateKind::Subtype(_) | ty::PredicateKind::Coerce(_) => {
+                        bug!("subtype/coerce predicate on function: {:#?}", predicate)
+                    }
+                    ty::PredicateKind::Trait(pred) => {
+                        if Some(pred.def_id()) == tcx.lang_items().sized_trait() {
+                            continue;
+                        }
+                        match pred.self_ty().kind() {
+                            ty::Param(p) => {
+                                let generics = tcx.generics_of(current);
+                                let def = generics.type_param(p, tcx);
+                                let span = tcx.def_span(def.def_id);
+
+                                // These are part of the function signature, so treat them like
+                                // arguments when determining importance.
+                                let kind = LocalKind::Arg;
+
+                                self.check_op_spanned(ops::ty::TraitBound(kind), span);
+                            }
+                            // other kinds of bounds are either tautologies
+                            // or cause errors in other passes
+                            _ => continue,
+                        }
+                    }
+                }
+            }
+            match predicates.parent {
+                Some(parent) => current = parent,
+                None => break,
+            }
+        }
+    }
+
+    fn check_mut_borrow(&mut self, local: Local, kind: hir::BorrowKind) {
+        match self.const_kind() {
+            // In a const fn all borrows are transient or point to the places given via
+            // references in the arguments (so we already checked them with
+            // TransientMutBorrow/MutBorrow as appropriate).
+            // The borrow checker guarantees that no new non-transient borrows are created.
+            // NOTE: Once we have heap allocations during CTFE we need to figure out
+            // how to prevent `const fn` to create long-lived allocations that point
+            // to mutable memory.
+            hir::ConstContext::ConstFn => self.check_op(ops::TransientMutBorrow(kind)),
+            _ => {
+                // Locals with StorageDead do not live beyond the evaluation and can
+                // thus safely be borrowed without being able to be leaked to the final
+                // value of the constant.
+                if self.local_has_storage_dead(local) {
+                    self.check_op(ops::TransientMutBorrow(kind));
+                } else {
+                    self.check_op(ops::MutBorrow(kind));
+                }
+            }
+        }
+    }
+}
+
+impl Visitor<'tcx> for Checker<'mir, 'tcx> {
+    fn visit_basic_block_data(&mut self, bb: BasicBlock, block: &BasicBlockData<'tcx>) {
+        trace!("visit_basic_block_data: bb={:?} is_cleanup={:?}", bb, block.is_cleanup);
+
+        // We don't const-check basic blocks on the cleanup path since we never unwind during
+        // const-eval: a panic causes an immediate compile error. In other words, cleanup blocks
+        // are unreachable during const-eval.
+        //
+        // We can't be more conservative (e.g., by const-checking cleanup blocks anyways) because
+        // locals that would never be dropped during normal execution are sometimes dropped during
+        // unwinding, which means backwards-incompatible live-drop errors.
+        if block.is_cleanup {
+            return;
+        }
+
+        self.super_basic_block_data(bb, block);
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+        trace!("visit_rvalue: rvalue={:?} location={:?}", rvalue, location);
+
+        // Special-case reborrows to be more like a copy of a reference.
+        match *rvalue {
+            Rvalue::Ref(_, kind, place) => {
+                if let Some(reborrowed_place_ref) = place_as_reborrow(self.tcx, self.body, place) {
+                    let ctx = match kind {
+                        BorrowKind::Shared => {
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow)
+                        }
+                        BorrowKind::Shallow => {
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow)
+                        }
+                        BorrowKind::Unique => {
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::UniqueBorrow)
+                        }
+                        BorrowKind::Mut { .. } => {
+                            PlaceContext::MutatingUse(MutatingUseContext::Borrow)
+                        }
+                    };
+                    self.visit_local(&reborrowed_place_ref.local, ctx, location);
+                    self.visit_projection(reborrowed_place_ref, ctx, location);
+                    return;
+                }
+            }
+            Rvalue::AddressOf(mutbl, place) => {
+                if let Some(reborrowed_place_ref) = place_as_reborrow(self.tcx, self.body, place) {
+                    let ctx = match mutbl {
+                        Mutability::Not => {
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::AddressOf)
+                        }
+                        Mutability::Mut => PlaceContext::MutatingUse(MutatingUseContext::AddressOf),
+                    };
+                    self.visit_local(&reborrowed_place_ref.local, ctx, location);
+                    self.visit_projection(reborrowed_place_ref, ctx, location);
+                    return;
+                }
+            }
+            _ => {}
+        }
+
+        self.super_rvalue(rvalue, location);
+
+        match *rvalue {
+            Rvalue::ThreadLocalRef(_) => self.check_op(ops::ThreadLocalAccess),
+
+            Rvalue::Use(_)
+            | Rvalue::Repeat(..)
+            | Rvalue::Discriminant(..)
+            | Rvalue::Len(_)
+            | Rvalue::Aggregate(..) => {}
+
+            Rvalue::Ref(_, kind @ BorrowKind::Mut { .. }, ref place)
+            | Rvalue::Ref(_, kind @ BorrowKind::Unique, ref place) => {
+                let ty = place.ty(self.body, self.tcx).ty;
+                let is_allowed = match ty.kind() {
+                    // Inside a `static mut`, `&mut [...]` is allowed.
+                    ty::Array(..) | ty::Slice(_)
+                        if self.const_kind() == hir::ConstContext::Static(hir::Mutability::Mut) =>
+                    {
+                        true
+                    }
+
+                    // FIXME(ecstaticmorse): We could allow `&mut []` inside a const context given
+                    // that this is merely a ZST and it is already eligible for promotion.
+                    // This may require an RFC?
+                    /*
+                    ty::Array(_, len) if len.try_eval_usize(cx.tcx, cx.param_env) == Some(0)
+                        => true,
+                    */
+                    _ => false,
+                };
+
+                if !is_allowed {
+                    if let BorrowKind::Mut { .. } = kind {
+                        self.check_mut_borrow(place.local, hir::BorrowKind::Ref)
+                    } else {
+                        self.check_op(ops::CellBorrow);
+                    }
+                }
+            }
+
+            Rvalue::AddressOf(Mutability::Mut, ref place) => {
+                self.check_mut_borrow(place.local, hir::BorrowKind::Raw)
+            }
+
+            Rvalue::Ref(_, BorrowKind::Shared | BorrowKind::Shallow, ref place)
+            | Rvalue::AddressOf(Mutability::Not, ref place) => {
+                let borrowed_place_has_mut_interior = qualifs::in_place::<HasMutInterior, _>(
+                    &self.ccx,
+                    &mut |local| self.qualifs.has_mut_interior(self.ccx, local, location),
+                    place.as_ref(),
+                );
+
+                if borrowed_place_has_mut_interior {
+                    match self.const_kind() {
+                        // In a const fn all borrows are transient or point to the places given via
+                        // references in the arguments (so we already checked them with
+                        // TransientCellBorrow/CellBorrow as appropriate).
+                        // The borrow checker guarantees that no new non-transient borrows are created.
+                        // NOTE: Once we have heap allocations during CTFE we need to figure out
+                        // how to prevent `const fn` to create long-lived allocations that point
+                        // to (interior) mutable memory.
+                        hir::ConstContext::ConstFn => self.check_op(ops::TransientCellBorrow),
+                        _ => {
+                            // Locals with StorageDead are definitely not part of the final constant value, and
+                            // it is thus inherently safe to permit such locals to have their
+                            // address taken as we can't end up with a reference to them in the
+                            // final value.
+                            // Note: This is only sound if every local that has a `StorageDead` has a
+                            // `StorageDead` in every control flow path leading to a `return` terminator.
+                            if self.local_has_storage_dead(place.local) {
+                                self.check_op(ops::TransientCellBorrow);
+                            } else {
+                                self.check_op(ops::CellBorrow);
+                            }
+                        }
+                    }
+                }
+            }
+
+            Rvalue::Cast(
+                CastKind::Pointer(PointerCast::MutToConstPointer | PointerCast::ArrayToPointer),
+                _,
+                _,
+            ) => {}
+
+            Rvalue::Cast(
+                CastKind::Pointer(
+                    PointerCast::UnsafeFnPointer
+                    | PointerCast::ClosureFnPointer(_)
+                    | PointerCast::ReifyFnPointer,
+                ),
+                _,
+                _,
+            ) => self.check_op(ops::FnPtrCast),
+
+            Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), _, _) => {
+                // Nothing to check here (`check_local_or_return_ty` ensures no trait objects occur
+                // in the type of any local, which also excludes casts).
+            }
+
+            Rvalue::Cast(CastKind::Misc, ref operand, cast_ty) => {
+                let operand_ty = operand.ty(self.body, self.tcx);
+                let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast");
+                let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
+
+                if let (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) = (cast_in, cast_out) {
+                    self.check_op(ops::RawPtrToIntCast);
+                }
+            }
+
+            Rvalue::NullaryOp(NullOp::SizeOf, _) => {}
+            Rvalue::NullaryOp(NullOp::Box, _) => self.check_op(ops::HeapAllocation),
+
+            Rvalue::UnaryOp(_, ref operand) => {
+                let ty = operand.ty(self.body, self.tcx);
+                if is_int_bool_or_char(ty) {
+                    // Int, bool, and char operations are fine.
+                } else if ty.is_floating_point() {
+                    self.check_op(ops::FloatingPointOp);
+                } else {
+                    span_bug!(self.span, "non-primitive type in `Rvalue::UnaryOp`: {:?}", ty);
+                }
+            }
+
+            Rvalue::BinaryOp(op, box (ref lhs, ref rhs))
+            | Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
+                let lhs_ty = lhs.ty(self.body, self.tcx);
+                let rhs_ty = rhs.ty(self.body, self.tcx);
+
+                if is_int_bool_or_char(lhs_ty) && is_int_bool_or_char(rhs_ty) {
+                    // Int, bool, and char operations are fine.
+                } else if lhs_ty.is_fn_ptr() || lhs_ty.is_unsafe_ptr() {
+                    assert_eq!(lhs_ty, rhs_ty);
+                    assert!(
+                        op == BinOp::Eq
+                            || op == BinOp::Ne
+                            || op == BinOp::Le
+                            || op == BinOp::Lt
+                            || op == BinOp::Ge
+                            || op == BinOp::Gt
+                            || op == BinOp::Offset
+                    );
+
+                    self.check_op(ops::RawPtrComparison);
+                } else if lhs_ty.is_floating_point() || rhs_ty.is_floating_point() {
+                    self.check_op(ops::FloatingPointOp);
+                } else {
+                    span_bug!(
+                        self.span,
+                        "non-primitive type in `Rvalue::BinaryOp`: {:?} ⚬ {:?}",
+                        lhs_ty,
+                        rhs_ty
+                    );
+                }
+            }
+        }
+    }
+
+    fn visit_operand(&mut self, op: &Operand<'tcx>, location: Location) {
+        self.super_operand(op, location);
+        if let Operand::Constant(c) = op {
+            if let Some(def_id) = c.check_static_ptr(self.tcx) {
+                self.check_static(def_id, self.span);
+            }
+        }
+    }
+    fn visit_projection_elem(
+        &mut self,
+        place_local: Local,
+        proj_base: &[PlaceElem<'tcx>],
+        elem: PlaceElem<'tcx>,
+        context: PlaceContext,
+        location: Location,
+    ) {
+        trace!(
+            "visit_projection_elem: place_local={:?} proj_base={:?} elem={:?} \
+            context={:?} location={:?}",
+            place_local,
+            proj_base,
+            elem,
+            context,
+            location,
+        );
+
+        self.super_projection_elem(place_local, proj_base, elem, context, location);
+
+        match elem {
+            ProjectionElem::Deref => {
+                let base_ty = Place::ty_from(place_local, proj_base, self.body, self.tcx).ty;
+                if let ty::RawPtr(_) = base_ty.kind() {
+                    if proj_base.is_empty() {
+                        let decl = &self.body.local_decls[place_local];
+                        if let Some(box LocalInfo::StaticRef { def_id, .. }) = decl.local_info {
+                            let span = decl.source_info.span;
+                            self.check_static(def_id, span);
+                            return;
+                        }
+                    }
+                    self.check_op(ops::RawPtrDeref);
+                }
+
+                if context.is_mutating_use() {
+                    self.check_op(ops::MutDeref);
+                }
+            }
+
+            ProjectionElem::ConstantIndex { .. }
+            | ProjectionElem::Downcast(..)
+            | ProjectionElem::Subslice { .. }
+            | ProjectionElem::Field(..)
+            | ProjectionElem::Index(_) => {}
+        }
+    }
+
+    fn visit_source_info(&mut self, source_info: &SourceInfo) {
+        trace!("visit_source_info: source_info={:?}", source_info);
+        self.span = source_info.span;
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        trace!("visit_statement: statement={:?} location={:?}", statement, location);
+
+        self.super_statement(statement, location);
+
+        match statement.kind {
+            StatementKind::LlvmInlineAsm { .. } => {
+                self.check_op(ops::InlineAsm);
+            }
+
+            StatementKind::Assign(..)
+            | StatementKind::SetDiscriminant { .. }
+            | StatementKind::FakeRead(..)
+            | StatementKind::StorageLive(_)
+            | StatementKind::StorageDead(_)
+            | StatementKind::Retag { .. }
+            | StatementKind::AscribeUserType(..)
+            | StatementKind::Coverage(..)
+            | StatementKind::CopyNonOverlapping(..)
+            | StatementKind::Nop => {}
+        }
+    }
+
+    #[instrument(level = "debug", skip(self))]
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        use rustc_target::spec::abi::Abi::RustIntrinsic;
+
+        self.super_terminator(terminator, location);
+
+        match &terminator.kind {
+            TerminatorKind::Call { func, args, .. } => {
+                let ConstCx { tcx, body, param_env, .. } = *self.ccx;
+                let caller = self.def_id().to_def_id();
+
+                let fn_ty = func.ty(body, tcx);
+
+                let (mut callee, mut substs) = match *fn_ty.kind() {
+                    ty::FnDef(def_id, substs) => (def_id, substs),
+
+                    ty::FnPtr(_) => {
+                        self.check_op(ops::FnCallIndirect);
+                        return;
+                    }
+                    _ => {
+                        span_bug!(terminator.source_info.span, "invalid callee of type {:?}", fn_ty)
+                    }
+                };
+
+                let mut nonconst_call_permission = false;
+
+                // Attempting to call a trait method?
+                if let Some(trait_id) = tcx.trait_of_item(callee) {
+                    trace!("attempting to call a trait method");
+                    if !self.tcx.features().const_trait_impl {
+                        self.check_op(ops::FnCallNonConst);
+                        return;
+                    }
+
+                    let trait_ref = TraitRef::from_method(tcx, trait_id, substs);
+                    let obligation = Obligation::new(
+                        ObligationCause::dummy(),
+                        param_env,
+                        Binder::dummy(TraitPredicate {
+                            trait_ref,
+                            constness: ty::BoundConstness::ConstIfConst,
+                        }),
+                    );
+
+                    let implsrc = tcx.infer_ctxt().enter(|infcx| {
+                        let mut selcx =
+                            SelectionContext::with_constness(&infcx, hir::Constness::Const);
+                        selcx.select(&obligation)
+                    });
+
+                    match implsrc {
+                        Ok(Some(ImplSource::Param(_, ty::BoundConstness::ConstIfConst))) => {
+                            debug!(
+                                "const_trait_impl: provided {:?} via where-clause in {:?}",
+                                trait_ref, param_env
+                            );
+                            return;
+                        }
+                        Ok(Some(ImplSource::UserDefined(data))) => {
+                            let callee_name = tcx.item_name(callee);
+                            if let Some(&did) = tcx
+                                .associated_item_def_ids(data.impl_def_id)
+                                .iter()
+                                .find(|did| tcx.item_name(**did) == callee_name)
+                            {
+                                // using internal substs is ok here, since this is only
+                                // used for the `resolve` call below
+                                substs = InternalSubsts::identity_for_item(tcx, did);
+                                callee = did;
+                            }
+                        }
+                        _ if !tcx.is_const_fn_raw(callee) => {
+                            // At this point, it is only legal when the caller is marked with
+                            // #[default_method_body_is_const], and the callee is in the same
+                            // trait.
+                            let callee_trait = tcx.trait_of_item(callee);
+                            if callee_trait.is_some() {
+                                if tcx.has_attr(caller, sym::default_method_body_is_const) {
+                                    if tcx.trait_of_item(caller) == callee_trait {
+                                        nonconst_call_permission = true;
+                                    }
+                                }
+                            }
+
+                            if !nonconst_call_permission {
+                                self.check_op(ops::FnCallNonConst);
+                                return;
+                            }
+                        }
+                        _ => {}
+                    }
+
+                    // Resolve a trait method call to its concrete implementation, which may be in a
+                    // `const` trait impl.
+                    let instance = Instance::resolve(tcx, param_env, callee, substs);
+                    debug!("Resolving ({:?}) -> {:?}", callee, instance);
+                    if let Ok(Some(func)) = instance {
+                        if let InstanceDef::Item(def) = func.def {
+                            callee = def.did;
+                        }
+                    }
+                }
+
+                // At this point, we are calling a function, `callee`, whose `DefId` is known...
+                if is_lang_panic_fn(tcx, callee) {
+                    self.check_op(ops::Panic);
+
+                    // const-eval of the `begin_panic` fn assumes the argument is `&str`
+                    if Some(callee) == tcx.lang_items().begin_panic_fn() {
+                        match args[0].ty(&self.ccx.body.local_decls, tcx).kind() {
+                            ty::Ref(_, ty, _) if ty.is_str() => (),
+                            _ => self.check_op(ops::PanicNonStr),
+                        }
+                    }
+
+                    return;
+                }
+
+                // `async` blocks get lowered to `std::future::from_generator(/* a closure */)`.
+                let is_async_block = Some(callee) == tcx.lang_items().from_generator_fn();
+                if is_async_block {
+                    let kind = hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block);
+                    self.check_op(ops::Generator(kind));
+                    return;
+                }
+
+                let is_intrinsic = tcx.fn_sig(callee).abi() == RustIntrinsic;
+
+                if !tcx.is_const_fn_raw(callee) {
+                    if tcx.trait_of_item(callee).is_some() {
+                        if tcx.has_attr(callee, sym::default_method_body_is_const) {
+                            // To get to here we must have already found a const impl for the
+                            // trait, but for it to still be non-const can be that the impl is
+                            // using default method bodies.
+                            nonconst_call_permission = true;
+                        }
+                    }
+
+                    if !nonconst_call_permission {
+                        self.check_op(ops::FnCallNonConst);
+                        return;
+                    }
+                }
+
+                // If the `const fn` we are trying to call is not const-stable, ensure that we have
+                // the proper feature gate enabled.
+                if let Some(gate) = is_unstable_const_fn(tcx, callee) {
+                    trace!(?gate, "calling unstable const fn");
+                    if self.span.allows_unstable(gate) {
+                        return;
+                    }
+
+                    // Calling an unstable function *always* requires that the corresponding gate
+                    // be enabled, even if the function has `#[rustc_allow_const_fn_unstable(the_gate)]`.
+                    if !tcx.features().declared_lib_features.iter().any(|&(sym, _)| sym == gate) {
+                        self.check_op(ops::FnCallUnstable(callee, Some(gate)));
+                        return;
+                    }
+
+                    // If this crate is not using stability attributes, or the caller is not claiming to be a
+                    // stable `const fn`, that is all that is required.
+                    if !self.ccx.is_const_stable_const_fn() {
+                        trace!("crate not using stability attributes or caller not stably const");
+                        return;
+                    }
+
+                    // Otherwise, we are something const-stable calling a const-unstable fn.
+
+                    if super::rustc_allow_const_fn_unstable(tcx, caller, gate) {
+                        trace!("rustc_allow_const_fn_unstable gate active");
+                        return;
+                    }
+
+                    self.check_op(ops::FnCallUnstable(callee, Some(gate)));
+                    return;
+                }
+
+                // FIXME(ecstaticmorse); For compatibility, we consider `unstable` callees that
+                // have no `rustc_const_stable` attributes to be const-unstable as well. This
+                // should be fixed later.
+                let callee_is_unstable_unmarked = tcx.lookup_const_stability(callee).is_none()
+                    && tcx.lookup_stability(callee).map_or(false, |s| s.level.is_unstable());
+                if callee_is_unstable_unmarked {
+                    trace!("callee_is_unstable_unmarked");
+                    // We do not use `const` modifiers for intrinsic "functions", as intrinsics are
+                    // `extern` funtions, and these have no way to get marked `const`. So instead we
+                    // use `rustc_const_(un)stable` attributes to mean that the intrinsic is `const`
+                    if self.ccx.is_const_stable_const_fn() || is_intrinsic {
+                        self.check_op(ops::FnCallUnstable(callee, None));
+                        return;
+                    }
+                }
+                trace!("permitting call");
+            }
+
+            // Forbid all `Drop` terminators unless the place being dropped is a local with no
+            // projections that cannot be `NeedsDrop`.
+            TerminatorKind::Drop { place: dropped_place, .. }
+            | TerminatorKind::DropAndReplace { place: dropped_place, .. } => {
+                // If we are checking live drops after drop-elaboration, don't emit duplicate
+                // errors here.
+                if super::post_drop_elaboration::checking_enabled(self.ccx) {
+                    return;
+                }
+
+                let mut err_span = self.span;
+
+                // Check to see if the type of this place can ever have a drop impl. If not, this
+                // `Drop` terminator is frivolous.
+                let ty_needs_drop =
+                    dropped_place.ty(self.body, self.tcx).ty.needs_drop(self.tcx, self.param_env);
+
+                if !ty_needs_drop {
+                    return;
+                }
+
+                let needs_drop = if let Some(local) = dropped_place.as_local() {
+                    // Use the span where the local was declared as the span of the drop error.
+                    err_span = self.body.local_decls[local].source_info.span;
+                    self.qualifs.needs_drop(self.ccx, local, location)
+                } else {
+                    true
+                };
+
+                if needs_drop {
+                    self.check_op_spanned(
+                        ops::LiveDrop { dropped_at: Some(terminator.source_info.span) },
+                        err_span,
+                    );
+                }
+            }
+
+            TerminatorKind::InlineAsm { .. } => self.check_op(ops::InlineAsm),
+
+            TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => {
+                self.check_op(ops::Generator(hir::GeneratorKind::Gen))
+            }
+
+            TerminatorKind::Abort => {
+                // Cleanup blocks are skipped for const checking (see `visit_basic_block_data`).
+                span_bug!(self.span, "`Abort` terminator outside of cleanup block")
+            }
+
+            TerminatorKind::Assert { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::Goto { .. }
+            | TerminatorKind::Resume
+            | TerminatorKind::Return
+            | TerminatorKind::SwitchInt { .. }
+            | TerminatorKind::Unreachable => {}
+        }
+    }
+}
+
+fn check_return_ty_is_sync(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, hir_id: HirId) {
+    let ty = body.return_ty();
+    tcx.infer_ctxt().enter(|infcx| {
+        let cause = traits::ObligationCause::new(body.span, hir_id, traits::SharedStatic);
+        let mut fulfillment_cx = traits::FulfillmentContext::new();
+        let sync_def_id = tcx.require_lang_item(LangItem::Sync, Some(body.span));
+        fulfillment_cx.register_bound(&infcx, ty::ParamEnv::empty(), ty, sync_def_id, cause);
+        if let Err(err) = fulfillment_cx.select_all_or_error(&infcx) {
+            infcx.report_fulfillment_errors(&err, None, false);
+        }
+    });
+}
+
+fn place_as_reborrow(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    place: Place<'tcx>,
+) -> Option<PlaceRef<'tcx>> {
+    match place.as_ref().last_projection() {
+        Some((place_base, ProjectionElem::Deref)) => {
+            // A borrow of a `static` also looks like `&(*_1)` in the MIR, but `_1` is a `const`
+            // that points to the allocation for the static. Don't treat these as reborrows.
+            if body.local_decls[place_base.local].is_ref_to_static() {
+                None
+            } else {
+                // Ensure the type being derefed is a reference and not a raw pointer.
+                // This is sufficient to prevent an access to a `static mut` from being marked as a
+                // reborrow, even if the check above were to disappear.
+                let inner_ty = place_base.ty(body, tcx).ty;
+
+                if let ty::Ref(..) = inner_ty.kind() {
+                    return Some(place_base);
+                } else {
+                    return None;
+                }
+            }
+        }
+        _ => None,
+    }
+}
+
+fn is_int_bool_or_char(ty: Ty<'_>) -> bool {
+    ty.is_bool() || ty.is_integral() || ty.is_char()
+}
+
+fn is_async_fn(ccx: &ConstCx<'_, '_>) -> bool {
+    ccx.fn_sig().map_or(false, |sig| sig.header.asyncness == hir::IsAsync::Async)
+}
+
+fn emit_unstable_in_stable_error(ccx: &ConstCx<'_, '_>, span: Span, gate: Symbol) {
+    let attr_span = ccx.fn_sig().map_or(ccx.body.span, |sig| sig.span.shrink_to_lo());
+
+    ccx.tcx
+        .sess
+        .struct_span_err(
+            span,
+            &format!("const-stable function cannot use `#[feature({})]`", gate.as_str()),
+        )
+        .span_suggestion(
+            attr_span,
+            "if it is not part of the public API, make this function unstably const",
+            concat!(r#"#[rustc_const_unstable(feature = "...", issue = "...")]"#, '\n').to_owned(),
+            Applicability::HasPlaceholders,
+        )
+        .span_suggestion(
+            attr_span,
+            "otherwise `#[rustc_allow_const_fn_unstable]` can be used to bypass stability checks",
+            format!("#[rustc_allow_const_fn_unstable({})]\n", gate),
+            Applicability::MaybeIncorrect,
+        )
+        .emit();
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
new file mode 100644
index 00000000000..a5cb0f4e14b
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
@@ -0,0 +1,135 @@
+//! Check the bodies of `const`s, `static`s and `const fn`s for illegal operations.
+//!
+//! This module will eventually replace the parts of `qualify_consts.rs` that check whether a local
+//! has interior mutability or needs to be dropped, as well as the visitor that emits errors when
+//! it finds operations that are invalid in a certain context.
+
+use rustc_attr as attr;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::mir;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::{sym, Symbol};
+
+pub use self::qualifs::Qualif;
+
+pub mod check;
+mod ops;
+pub mod post_drop_elaboration;
+pub mod qualifs;
+mod resolver;
+
+/// Information about the item currently being const-checked, as well as a reference to the global
+/// context.
+pub struct ConstCx<'mir, 'tcx> {
+    pub body: &'mir mir::Body<'tcx>,
+    pub tcx: TyCtxt<'tcx>,
+    pub param_env: ty::ParamEnv<'tcx>,
+    pub const_kind: Option<hir::ConstContext>,
+}
+
+impl ConstCx<'mir, 'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>, body: &'mir mir::Body<'tcx>) -> Self {
+        let def_id = body.source.def_id().expect_local();
+        let param_env = tcx.param_env(def_id);
+        Self::new_with_param_env(tcx, body, param_env)
+    }
+
+    pub fn new_with_param_env(
+        tcx: TyCtxt<'tcx>,
+        body: &'mir mir::Body<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> Self {
+        let const_kind = tcx.hir().body_const_context(body.source.def_id().expect_local());
+        ConstCx { body, tcx, param_env, const_kind }
+    }
+
+    pub fn def_id(&self) -> LocalDefId {
+        self.body.source.def_id().expect_local()
+    }
+
+    /// Returns the kind of const context this `Item` represents (`const`, `static`, etc.).
+    ///
+    /// Panics if this `Item` is not const.
+    pub fn const_kind(&self) -> hir::ConstContext {
+        self.const_kind.expect("`const_kind` must not be called on a non-const fn")
+    }
+
+    pub fn is_const_stable_const_fn(&self) -> bool {
+        self.const_kind == Some(hir::ConstContext::ConstFn)
+            && self.tcx.features().staged_api
+            && is_const_stable_const_fn(self.tcx, self.def_id().to_def_id())
+    }
+
+    /// Returns the function signature of the item being const-checked if it is a `fn` or `const fn`.
+    pub fn fn_sig(&self) -> Option<&'tcx hir::FnSig<'tcx>> {
+        // Get this from the HIR map instead of a query to avoid cycle errors.
+        //
+        // FIXME: Is this still an issue?
+        let hir_map = self.tcx.hir();
+        let hir_id = hir_map.local_def_id_to_hir_id(self.def_id());
+        hir_map.fn_sig_by_hir_id(hir_id)
+    }
+}
+
+/// Returns `true` if this `DefId` points to one of the official `panic` lang items.
+pub fn is_lang_panic_fn(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool {
+    // We can allow calls to these functions because `hook_panic_fn` in
+    // `const_eval/machine.rs` ensures the calls are handled specially.
+    // Keep in sync with what that function handles!
+    Some(def_id) == tcx.lang_items().panic_fn()
+        || Some(def_id) == tcx.lang_items().panic_str()
+        || Some(def_id) == tcx.lang_items().begin_panic_fn()
+        || Some(def_id) == tcx.lang_items().panic_fmt()
+        || Some(def_id) == tcx.lang_items().begin_panic_fmt()
+}
+
+pub fn rustc_allow_const_fn_unstable(
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    feature_gate: Symbol,
+) -> bool {
+    let attrs = tcx.get_attrs(def_id);
+    attr::rustc_allow_const_fn_unstable(&tcx.sess, attrs).any(|name| name == feature_gate)
+}
+
+// Returns `true` if the given `const fn` is "const-stable".
+//
+// Panics if the given `DefId` does not refer to a `const fn`.
+//
+// Const-stability is only relevant for `const fn` within a `staged_api` crate. Only "const-stable"
+// functions can be called in a const-context by users of the stable compiler. "const-stable"
+// functions are subject to more stringent restrictions than "const-unstable" functions: They
+// cannot use unstable features and can only call other "const-stable" functions.
+pub fn is_const_stable_const_fn(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool {
+    use attr::{ConstStability, Stability, StabilityLevel};
+
+    // A default body marked const is not const-stable because const
+    // trait fns currently cannot be const-stable. We shouldn't
+    // restrict default bodies to only call const-stable functions.
+    if tcx.has_attr(def_id, sym::default_method_body_is_const) {
+        return false;
+    }
+
+    // Const-stability is only relevant for `const fn`.
+    assert!(tcx.is_const_fn_raw(def_id));
+
+    // Functions with `#[rustc_const_unstable]` are const-unstable.
+    match tcx.lookup_const_stability(def_id) {
+        Some(ConstStability { level: StabilityLevel::Unstable { .. }, .. }) => return false,
+        Some(ConstStability { level: StabilityLevel::Stable { .. }, .. }) => return true,
+        None => {}
+    }
+
+    // Functions with `#[unstable]` are const-unstable.
+    //
+    // FIXME(ecstaticmorse): We should keep const-stability attributes wholly separate from normal stability
+    // attributes. `#[unstable]` should be irrelevant.
+    if let Some(Stability { level: StabilityLevel::Unstable { .. }, .. }) =
+        tcx.lookup_stability(def_id)
+    {
+        return false;
+    }
+
+    true
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
new file mode 100644
index 00000000000..8923d989b29
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
@@ -0,0 +1,628 @@
+//! Concrete error types for all operations which may be invalid in a certain const context.
+
+use rustc_errors::{struct_span_err, DiagnosticBuilder};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir;
+use rustc_session::parse::feature_err;
+use rustc_span::symbol::sym;
+use rustc_span::{Span, Symbol};
+
+use super::ConstCx;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum Status {
+    Allowed,
+    Unstable(Symbol),
+    Forbidden,
+}
+
+#[derive(Clone, Copy)]
+pub enum DiagnosticImportance {
+    /// An operation that must be removed for const-checking to pass.
+    Primary,
+
+    /// An operation that causes const-checking to fail, but is usually a side-effect of a `Primary` operation elsewhere.
+    Secondary,
+}
+
+/// An operation that is not *always* allowed in a const context.
+pub trait NonConstOp: std::fmt::Debug {
+    /// Returns an enum indicating whether this operation is allowed within the given item.
+    fn status_in_item(&self, _ccx: &ConstCx<'_, '_>) -> Status {
+        Status::Forbidden
+    }
+
+    fn importance(&self) -> DiagnosticImportance {
+        DiagnosticImportance::Primary
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx>;
+}
+
+#[derive(Debug)]
+pub struct FloatingPointOp;
+impl NonConstOp for FloatingPointOp {
+    fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
+        if ccx.const_kind() == hir::ConstContext::ConstFn {
+            Status::Unstable(sym::const_fn_floating_point_arithmetic)
+        } else {
+            Status::Allowed
+        }
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        feature_err(
+            &ccx.tcx.sess.parse_sess,
+            sym::const_fn_floating_point_arithmetic,
+            span,
+            &format!("floating point arithmetic is not allowed in {}s", ccx.const_kind()),
+        )
+    }
+}
+
+/// A function call where the callee is a pointer.
+#[derive(Debug)]
+pub struct FnCallIndirect;
+impl NonConstOp for FnCallIndirect {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        ccx.tcx.sess.struct_span_err(span, "function pointers are not allowed in const fn")
+    }
+}
+
+/// A function call where the callee is not marked as `const`.
+#[derive(Debug)]
+pub struct FnCallNonConst;
+impl NonConstOp for FnCallNonConst {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0015,
+            "calls in {}s are limited to constant functions, \
+             tuple structs and tuple variants",
+            ccx.const_kind(),
+        )
+    }
+}
+
+/// A call to an `#[unstable]` const fn or `#[rustc_const_unstable]` function.
+///
+/// Contains the name of the feature that would allow the use of this function.
+#[derive(Debug)]
+pub struct FnCallUnstable(pub DefId, pub Option<Symbol>);
+
+impl NonConstOp for FnCallUnstable {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        let FnCallUnstable(def_id, feature) = *self;
+
+        let mut err = ccx.tcx.sess.struct_span_err(
+            span,
+            &format!("`{}` is not yet stable as a const fn", ccx.tcx.def_path_str(def_id)),
+        );
+
+        if ccx.is_const_stable_const_fn() {
+            err.help("Const-stable functions can only call other const-stable functions");
+        } else if ccx.tcx.sess.is_nightly_build() {
+            if let Some(feature) = feature {
+                err.help(&format!(
+                    "add `#![feature({})]` to the crate attributes to enable",
+                    feature
+                ));
+            }
+        }
+
+        err
+    }
+}
+
+#[derive(Debug)]
+pub struct FnPtrCast;
+impl NonConstOp for FnPtrCast {
+    fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
+        if ccx.const_kind() != hir::ConstContext::ConstFn {
+            Status::Allowed
+        } else {
+            Status::Unstable(sym::const_fn_fn_ptr_basics)
+        }
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        feature_err(
+            &ccx.tcx.sess.parse_sess,
+            sym::const_fn_fn_ptr_basics,
+            span,
+            &format!("function pointer casts are not allowed in {}s", ccx.const_kind()),
+        )
+    }
+}
+
+#[derive(Debug)]
+pub struct Generator(pub hir::GeneratorKind);
+impl NonConstOp for Generator {
+    fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
+        if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
+            Status::Unstable(sym::const_async_blocks)
+        } else {
+            Status::Forbidden
+        }
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        let msg = format!("{}s are not allowed in {}s", self.0, ccx.const_kind());
+        if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
+            feature_err(&ccx.tcx.sess.parse_sess, sym::const_async_blocks, span, &msg)
+        } else {
+            ccx.tcx.sess.struct_span_err(span, &msg)
+        }
+    }
+}
+
+#[derive(Debug)]
+pub struct HeapAllocation;
+impl NonConstOp for HeapAllocation {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        let mut err = struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0010,
+            "allocations are not allowed in {}s",
+            ccx.const_kind()
+        );
+        err.span_label(span, format!("allocation not allowed in {}s", ccx.const_kind()));
+        if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+            err.note(
+                "The value of statics and constants must be known at compile time, \
+                 and they live for the entire lifetime of a program. Creating a boxed \
+                 value allocates memory on the heap at runtime, and therefore cannot \
+                 be done at compile time.",
+            );
+        }
+        err
+    }
+}
+
+#[derive(Debug)]
+pub struct InlineAsm;
+impl NonConstOp for InlineAsm {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0015,
+            "inline assembly is not allowed in {}s",
+            ccx.const_kind()
+        )
+    }
+}
+
+#[derive(Debug)]
+pub struct LiveDrop {
+    pub dropped_at: Option<Span>,
+}
+impl NonConstOp for LiveDrop {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        let mut err = struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0493,
+            "destructors cannot be evaluated at compile-time"
+        );
+        err.span_label(span, format!("{}s cannot evaluate destructors", ccx.const_kind()));
+        if let Some(span) = self.dropped_at {
+            err.span_label(span, "value is dropped here");
+        }
+        err
+    }
+}
+
+#[derive(Debug)]
+/// A borrow of a type that contains an `UnsafeCell` somewhere. The borrow never escapes to
+/// the final value of the constant.
+pub struct TransientCellBorrow;
+impl NonConstOp for TransientCellBorrow {
+    fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
+        Status::Unstable(sym::const_refs_to_cell)
+    }
+    fn importance(&self) -> DiagnosticImportance {
+        // The cases that cannot possibly work will already emit a `CellBorrow`, so we should
+        // not additionally emit a feature gate error if activating the feature gate won't work.
+        DiagnosticImportance::Secondary
+    }
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        feature_err(
+            &ccx.tcx.sess.parse_sess,
+            sym::const_refs_to_cell,
+            span,
+            "cannot borrow here, since the borrowed element may contain interior mutability",
+        )
+    }
+}
+
+#[derive(Debug)]
+/// A borrow of a type that contains an `UnsafeCell` somewhere. The borrow might escape to
+/// the final value of the constant, and thus we cannot allow this (for now). We may allow
+/// it in the future for static items.
+pub struct CellBorrow;
+impl NonConstOp for CellBorrow {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        let mut err = struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0492,
+            "{}s cannot refer to interior mutable data",
+            ccx.const_kind(),
+        );
+        err.span_label(
+            span,
+            "this borrow of an interior mutable value may end up in the final value",
+        );
+        if let hir::ConstContext::Static(_) = ccx.const_kind() {
+            err.help(
+                "to fix this, the value can be extracted to a separate \
+                `static` item and then referenced",
+            );
+        }
+        if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+            err.note(
+                "A constant containing interior mutable data behind a reference can allow you
+                 to modify that data. This would make multiple uses of a constant to be able to
+                 see different values and allow circumventing the `Send` and `Sync` requirements
+                 for shared mutable data, which is unsound.",
+            );
+        }
+        err
+    }
+}
+
+#[derive(Debug)]
+/// This op is for `&mut` borrows in the trailing expression of a constant
+/// which uses the "enclosing scopes rule" to leak its locals into anonymous
+/// static or const items.
+pub struct MutBorrow(pub hir::BorrowKind);
+
+impl NonConstOp for MutBorrow {
+    fn status_in_item(&self, _ccx: &ConstCx<'_, '_>) -> Status {
+        Status::Forbidden
+    }
+
+    fn importance(&self) -> DiagnosticImportance {
+        // If there were primary errors (like non-const function calls), do not emit further
+        // errors about mutable references.
+        DiagnosticImportance::Secondary
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        let raw = match self.0 {
+            hir::BorrowKind::Raw => "raw ",
+            hir::BorrowKind::Ref => "",
+        };
+
+        let mut err = struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0764,
+            "{}mutable references are not allowed in the final value of {}s",
+            raw,
+            ccx.const_kind(),
+        );
+
+        if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+            err.note(
+                "References in statics and constants may only refer \
+                      to immutable values.\n\n\
+                      Statics are shared everywhere, and if they refer to \
+                      mutable data one might violate memory safety since \
+                      holding multiple mutable references to shared data \
+                      is not allowed.\n\n\
+                      If you really want global mutable state, try using \
+                      static mut or a global UnsafeCell.",
+            );
+        }
+        err
+    }
+}
+
+#[derive(Debug)]
+pub struct TransientMutBorrow(pub hir::BorrowKind);
+
+impl NonConstOp for TransientMutBorrow {
+    fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
+        Status::Unstable(sym::const_mut_refs)
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        let raw = match self.0 {
+            hir::BorrowKind::Raw => "raw ",
+            hir::BorrowKind::Ref => "",
+        };
+
+        feature_err(
+            &ccx.tcx.sess.parse_sess,
+            sym::const_mut_refs,
+            span,
+            &format!("{}mutable references are not allowed in {}s", raw, ccx.const_kind()),
+        )
+    }
+}
+
+#[derive(Debug)]
+pub struct MutDeref;
+impl NonConstOp for MutDeref {
+    fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
+        Status::Unstable(sym::const_mut_refs)
+    }
+
+    fn importance(&self) -> DiagnosticImportance {
+        // Usually a side-effect of a `TransientMutBorrow` somewhere.
+        DiagnosticImportance::Secondary
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        feature_err(
+            &ccx.tcx.sess.parse_sess,
+            sym::const_mut_refs,
+            span,
+            &format!("mutation through a reference is not allowed in {}s", ccx.const_kind()),
+        )
+    }
+}
+
+#[derive(Debug)]
+pub struct Panic;
+impl NonConstOp for Panic {
+    fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
+        Status::Unstable(sym::const_panic)
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        feature_err(
+            &ccx.tcx.sess.parse_sess,
+            sym::const_panic,
+            span,
+            &format!("panicking in {}s is unstable", ccx.const_kind()),
+        )
+    }
+}
+
+/// A call to a `panic()` lang item where the first argument is _not_ a `&str`.
+#[derive(Debug)]
+pub struct PanicNonStr;
+impl NonConstOp for PanicNonStr {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        ccx.tcx.sess.struct_span_err(
+            span,
+            "argument to `panic!()` in a const context must have type `&str`",
+        )
+    }
+}
+
+/// Comparing raw pointers for equality.
+/// Not currently intended to ever be allowed, even behind a feature gate: operation depends on
+/// allocation base addresses that are not known at compile-time.
+#[derive(Debug)]
+pub struct RawPtrComparison;
+impl NonConstOp for RawPtrComparison {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        let mut err = ccx
+            .tcx
+            .sess
+            .struct_span_err(span, "pointers cannot be reliably compared during const eval.");
+        err.note(
+            "see issue #53020 <https://github.com/rust-lang/rust/issues/53020> \
+            for more information",
+        );
+        err
+    }
+}
+
+#[derive(Debug)]
+pub struct RawPtrDeref;
+impl NonConstOp for RawPtrDeref {
+    fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
+        Status::Unstable(sym::const_raw_ptr_deref)
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        feature_err(
+            &ccx.tcx.sess.parse_sess,
+            sym::const_raw_ptr_deref,
+            span,
+            &format!("dereferencing raw pointers in {}s is unstable", ccx.const_kind(),),
+        )
+    }
+}
+
+/// Casting raw pointer or function pointer to an integer.
+/// Not currently intended to ever be allowed, even behind a feature gate: operation depends on
+/// allocation base addresses that are not known at compile-time.
+#[derive(Debug)]
+pub struct RawPtrToIntCast;
+impl NonConstOp for RawPtrToIntCast {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        let mut err = ccx
+            .tcx
+            .sess
+            .struct_span_err(span, "pointers cannot be cast to integers during const eval.");
+        err.note("at compile-time, pointers do not have an integer value");
+        err.note(
+            "avoiding this restriction via `transmute`, `union`, or raw pointers leads to compile-time undefined behavior",
+        );
+        err
+    }
+}
+
+/// An access to a (non-thread-local) `static`.
+#[derive(Debug)]
+pub struct StaticAccess;
+impl NonConstOp for StaticAccess {
+    fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
+        if let hir::ConstContext::Static(_) = ccx.const_kind() {
+            Status::Allowed
+        } else {
+            Status::Forbidden
+        }
+    }
+
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        let mut err = struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0013,
+            "{}s cannot refer to statics",
+            ccx.const_kind()
+        );
+        err.help(
+            "consider extracting the value of the `static` to a `const`, and referring to that",
+        );
+        if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+            err.note(
+                "`static` and `const` variables can refer to other `const` variables. \
+                    A `const` variable, however, cannot refer to a `static` variable.",
+            );
+            err.help("To fix this, the value can be extracted to a `const` and then used.");
+        }
+        err
+    }
+}
+
+/// An access to a thread-local `static`.
+#[derive(Debug)]
+pub struct ThreadLocalAccess;
+impl NonConstOp for ThreadLocalAccess {
+    fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+        struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0625,
+            "thread-local statics cannot be \
+            accessed at compile-time"
+        )
+    }
+}
+
+// Types that cannot appear in the signature or locals of a `const fn`.
+pub mod ty {
+    use super::*;
+
+    #[derive(Debug)]
+    pub struct MutRef(pub mir::LocalKind);
+    impl NonConstOp for MutRef {
+        fn status_in_item(&self, _ccx: &ConstCx<'_, '_>) -> Status {
+            Status::Unstable(sym::const_mut_refs)
+        }
+
+        fn importance(&self) -> DiagnosticImportance {
+            match self.0 {
+                mir::LocalKind::Var | mir::LocalKind::Temp => DiagnosticImportance::Secondary,
+                mir::LocalKind::ReturnPointer | mir::LocalKind::Arg => {
+                    DiagnosticImportance::Primary
+                }
+            }
+        }
+
+        fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+            feature_err(
+                &ccx.tcx.sess.parse_sess,
+                sym::const_mut_refs,
+                span,
+                &format!("mutable references are not allowed in {}s", ccx.const_kind()),
+            )
+        }
+    }
+
+    #[derive(Debug)]
+    pub struct FnPtr(pub mir::LocalKind);
+    impl NonConstOp for FnPtr {
+        fn importance(&self) -> DiagnosticImportance {
+            match self.0 {
+                mir::LocalKind::Var | mir::LocalKind::Temp => DiagnosticImportance::Secondary,
+                mir::LocalKind::ReturnPointer | mir::LocalKind::Arg => {
+                    DiagnosticImportance::Primary
+                }
+            }
+        }
+
+        fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
+            if ccx.const_kind() != hir::ConstContext::ConstFn {
+                Status::Allowed
+            } else {
+                Status::Unstable(sym::const_fn_fn_ptr_basics)
+            }
+        }
+
+        fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+            feature_err(
+                &ccx.tcx.sess.parse_sess,
+                sym::const_fn_fn_ptr_basics,
+                span,
+                &format!("function pointers cannot appear in {}s", ccx.const_kind()),
+            )
+        }
+    }
+
+    #[derive(Debug)]
+    pub struct ImplTrait;
+    impl NonConstOp for ImplTrait {
+        fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
+            Status::Unstable(sym::const_impl_trait)
+        }
+
+        fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+            feature_err(
+                &ccx.tcx.sess.parse_sess,
+                sym::const_impl_trait,
+                span,
+                &format!("`impl Trait` is not allowed in {}s", ccx.const_kind()),
+            )
+        }
+    }
+
+    #[derive(Debug)]
+    pub struct TraitBound(pub mir::LocalKind);
+    impl NonConstOp for TraitBound {
+        fn importance(&self) -> DiagnosticImportance {
+            match self.0 {
+                mir::LocalKind::Var | mir::LocalKind::Temp => DiagnosticImportance::Secondary,
+                mir::LocalKind::ReturnPointer | mir::LocalKind::Arg => {
+                    DiagnosticImportance::Primary
+                }
+            }
+        }
+
+        fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
+            if ccx.const_kind() != hir::ConstContext::ConstFn {
+                Status::Allowed
+            } else {
+                Status::Unstable(sym::const_fn_trait_bound)
+            }
+        }
+
+        fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+            feature_err(
+                &ccx.tcx.sess.parse_sess,
+                sym::const_fn_trait_bound,
+                span,
+                "trait bounds other than `Sized` on const fn parameters are unstable",
+            )
+        }
+    }
+
+    /// A trait bound with the `?const Trait` opt-out
+    #[derive(Debug)]
+    pub struct TraitBoundNotConst;
+    impl NonConstOp for TraitBoundNotConst {
+        fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
+            Status::Unstable(sym::const_trait_bound_opt_out)
+        }
+
+        fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
+            feature_err(
+                &ccx.tcx.sess.parse_sess,
+                sym::const_trait_bound_opt_out,
+                span,
+                "`?const Trait` syntax is unstable",
+            )
+        }
+    }
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
new file mode 100644
index 00000000000..b08ce219034
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
@@ -0,0 +1,123 @@
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{self, BasicBlock, Location};
+use rustc_middle::ty::TyCtxt;
+use rustc_span::Span;
+
+use super::check::Qualifs;
+use super::ops::{self, NonConstOp};
+use super::qualifs::{NeedsDrop, Qualif};
+use super::ConstCx;
+
+/// Returns `true` if we should use the more precise live drop checker that runs after drop
+/// elaboration.
+pub fn checking_enabled(ccx: &ConstCx<'_, '_>) -> bool {
+    // Const-stable functions must always use the stable live drop checker.
+    if ccx.is_const_stable_const_fn() {
+        return false;
+    }
+
+    ccx.tcx.features().const_precise_live_drops
+}
+
+/// Look for live drops in a const context.
+///
+/// This is separate from the rest of the const checking logic because it must run after drop
+/// elaboration.
+pub fn check_live_drops(tcx: TyCtxt<'tcx>, body: &mir::Body<'tcx>) {
+    let def_id = body.source.def_id().expect_local();
+    let const_kind = tcx.hir().body_const_context(def_id);
+    if const_kind.is_none() {
+        return;
+    }
+
+    let ccx = ConstCx { body, tcx, const_kind, param_env: tcx.param_env(def_id) };
+    if !checking_enabled(&ccx) {
+        return;
+    }
+
+    let mut visitor = CheckLiveDrops { ccx: &ccx, qualifs: Qualifs::default() };
+
+    visitor.visit_body(body);
+}
+
+struct CheckLiveDrops<'mir, 'tcx> {
+    ccx: &'mir ConstCx<'mir, 'tcx>,
+    qualifs: Qualifs<'mir, 'tcx>,
+}
+
+// So we can access `body` and `tcx`.
+impl std::ops::Deref for CheckLiveDrops<'mir, 'tcx> {
+    type Target = ConstCx<'mir, 'tcx>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.ccx
+    }
+}
+
+impl CheckLiveDrops<'mir, 'tcx> {
+    fn check_live_drop(&self, span: Span) {
+        ops::LiveDrop { dropped_at: None }.build_error(self.ccx, span).emit();
+    }
+}
+
+impl Visitor<'tcx> for CheckLiveDrops<'mir, 'tcx> {
+    fn visit_basic_block_data(&mut self, bb: BasicBlock, block: &mir::BasicBlockData<'tcx>) {
+        trace!("visit_basic_block_data: bb={:?} is_cleanup={:?}", bb, block.is_cleanup);
+
+        // Ignore drop terminators in cleanup blocks.
+        if block.is_cleanup {
+            return;
+        }
+
+        self.super_basic_block_data(bb, block);
+    }
+
+    fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+        trace!("visit_terminator: terminator={:?} location={:?}", terminator, location);
+
+        match &terminator.kind {
+            mir::TerminatorKind::Drop { place: dropped_place, .. } => {
+                let dropped_ty = dropped_place.ty(self.body, self.tcx).ty;
+                if !NeedsDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
+                    bug!(
+                        "Drop elaboration left behind a Drop for a type that does not need dropping"
+                    );
+                }
+
+                if dropped_place.is_indirect() {
+                    self.check_live_drop(terminator.source_info.span);
+                    return;
+                }
+
+                // Drop elaboration is not precise enough to accept code like
+                // `src/test/ui/consts/control-flow/drop-pass.rs`; e.g., when an `Option<Vec<T>>` is
+                // initialized with `None` and never changed, it still emits drop glue.
+                // Hence we additionally check the qualifs here to allow more code to pass.
+                if self.qualifs.needs_drop(self.ccx, dropped_place.local, location) {
+                    // Use the span where the dropped local was declared for the error.
+                    let span = self.body.local_decls[dropped_place.local].source_info.span;
+                    self.check_live_drop(span);
+                }
+            }
+
+            mir::TerminatorKind::DropAndReplace { .. } => span_bug!(
+                terminator.source_info.span,
+                "`DropAndReplace` should be removed by drop elaboration",
+            ),
+
+            mir::TerminatorKind::Abort
+            | mir::TerminatorKind::Call { .. }
+            | mir::TerminatorKind::Assert { .. }
+            | mir::TerminatorKind::FalseEdge { .. }
+            | mir::TerminatorKind::FalseUnwind { .. }
+            | mir::TerminatorKind::GeneratorDrop
+            | mir::TerminatorKind::Goto { .. }
+            | mir::TerminatorKind::InlineAsm { .. }
+            | mir::TerminatorKind::Resume
+            | mir::TerminatorKind::Return
+            | mir::TerminatorKind::SwitchInt { .. }
+            | mir::TerminatorKind::Unreachable
+            | mir::TerminatorKind::Yield { .. } => {}
+        }
+    }
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
new file mode 100644
index 00000000000..413a9638eb3
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -0,0 +1,272 @@
+//! Structural const qualification.
+//!
+//! See the `Qualif` trait for more info.
+
+use rustc_errors::ErrorReported;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, subst::SubstsRef, AdtDef, Ty};
+use rustc_span::DUMMY_SP;
+use rustc_trait_selection::traits;
+
+use super::ConstCx;
+
+pub fn in_any_value_of_ty(
+    cx: &ConstCx<'_, 'tcx>,
+    ty: Ty<'tcx>,
+    error_occured: Option<ErrorReported>,
+) -> ConstQualifs {
+    ConstQualifs {
+        has_mut_interior: HasMutInterior::in_any_value_of_ty(cx, ty),
+        needs_drop: NeedsDrop::in_any_value_of_ty(cx, ty),
+        custom_eq: CustomEq::in_any_value_of_ty(cx, ty),
+        error_occured,
+    }
+}
+
+/// A "qualif"(-ication) is a way to look for something "bad" in the MIR that would disqualify some
+/// code for promotion or prevent it from evaluating at compile time.
+///
+/// Normally, we would determine what qualifications apply to each type and error when an illegal
+/// operation is performed on such a type. However, this was found to be too imprecise, especially
+/// in the presence of `enum`s. If only a single variant of an enum has a certain qualification, we
+/// needn't reject code unless it actually constructs and operates on the qualified variant.
+///
+/// To accomplish this, const-checking and promotion use a value-based analysis (as opposed to a
+/// type-based one). Qualifications propagate structurally across variables: If a local (or a
+/// projection of a local) is assigned a qualified value, that local itself becomes qualified.
+pub trait Qualif {
+    /// The name of the file used to debug the dataflow analysis that computes this qualif.
+    const ANALYSIS_NAME: &'static str;
+
+    /// Whether this `Qualif` is cleared when a local is moved from.
+    const IS_CLEARED_ON_MOVE: bool = false;
+
+    /// Extracts the field of `ConstQualifs` that corresponds to this `Qualif`.
+    fn in_qualifs(qualifs: &ConstQualifs) -> bool;
+
+    /// Returns `true` if *any* value of the given type could possibly have this `Qualif`.
+    ///
+    /// This function determines `Qualif`s when we cannot do a value-based analysis. Since qualif
+    /// propagation is context-insenstive, this includes function arguments and values returned
+    /// from a call to another function.
+    ///
+    /// It also determines the `Qualif`s for primitive types.
+    fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool;
+
+    /// Returns `true` if this `Qualif` is inherent to the given struct or enum.
+    ///
+    /// By default, `Qualif`s propagate into ADTs in a structural way: An ADT only becomes
+    /// qualified if part of it is assigned a value with that `Qualif`. However, some ADTs *always*
+    /// have a certain `Qualif`, regardless of whether their fields have it. For example, a type
+    /// with a custom `Drop` impl is inherently `NeedsDrop`.
+    ///
+    /// Returning `true` for `in_adt_inherently` but `false` for `in_any_value_of_ty` is unsound.
+    fn in_adt_inherently(
+        cx: &ConstCx<'_, 'tcx>,
+        adt: &'tcx AdtDef,
+        substs: SubstsRef<'tcx>,
+    ) -> bool;
+}
+
+/// Constant containing interior mutability (`UnsafeCell<T>`).
+/// This must be ruled out to make sure that evaluating the constant at compile-time
+/// and at *any point* during the run-time would produce the same result. In particular,
+/// promotion of temporaries must not change program behavior; if the promoted could be
+/// written to, that would be a problem.
+pub struct HasMutInterior;
+
+impl Qualif for HasMutInterior {
+    const ANALYSIS_NAME: &'static str = "flow_has_mut_interior";
+
+    fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+        qualifs.has_mut_interior
+    }
+
+    fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+        !ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env)
+    }
+
+    fn in_adt_inherently(cx: &ConstCx<'_, 'tcx>, adt: &'tcx AdtDef, _: SubstsRef<'tcx>) -> bool {
+        // Exactly one type, `UnsafeCell`, has the `HasMutInterior` qualif inherently.
+        // It arises structurally for all other types.
+        Some(adt.did) == cx.tcx.lang_items().unsafe_cell_type()
+    }
+}
+
+/// Constant containing an ADT that implements `Drop`.
+/// This must be ruled out (a) because we cannot run `Drop` during compile-time
+/// as that might not be a `const fn`, and (b) because implicit promotion would
+/// remove side-effects that occur as part of dropping that value.
+pub struct NeedsDrop;
+
+impl Qualif for NeedsDrop {
+    const ANALYSIS_NAME: &'static str = "flow_needs_drop";
+    const IS_CLEARED_ON_MOVE: bool = true;
+
+    fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+        qualifs.needs_drop
+    }
+
+    fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+        ty.needs_drop(cx.tcx, cx.param_env)
+    }
+
+    fn in_adt_inherently(cx: &ConstCx<'_, 'tcx>, adt: &'tcx AdtDef, _: SubstsRef<'tcx>) -> bool {
+        adt.has_dtor(cx.tcx)
+    }
+}
+
+/// A constant that cannot be used as part of a pattern in a `match` expression.
+pub struct CustomEq;
+
+impl Qualif for CustomEq {
+    const ANALYSIS_NAME: &'static str = "flow_custom_eq";
+
+    fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+        qualifs.custom_eq
+    }
+
+    fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+        // If *any* component of a composite data type does not implement `Structural{Partial,}Eq`,
+        // we know that at least some values of that type are not structural-match. I say "some"
+        // because that component may be part of an enum variant (e.g.,
+        // `Option::<NonStructuralMatchTy>::Some`), in which case some values of this type may be
+        // structural-match (`Option::None`).
+        let id = cx.tcx.hir().local_def_id_to_hir_id(cx.def_id());
+        traits::search_for_structural_match_violation(id, cx.body.span, cx.tcx, ty).is_some()
+    }
+
+    fn in_adt_inherently(
+        cx: &ConstCx<'_, 'tcx>,
+        adt: &'tcx AdtDef,
+        substs: SubstsRef<'tcx>,
+    ) -> bool {
+        let ty = cx.tcx.mk_ty(ty::Adt(adt, substs));
+        !ty.is_structural_eq_shallow(cx.tcx)
+    }
+}
+
+// FIXME: Use `mir::visit::Visitor` for the `in_*` functions if/when it supports early return.
+
+/// Returns `true` if this `Rvalue` contains qualif `Q`.
+pub fn in_rvalue<Q, F>(cx: &ConstCx<'_, 'tcx>, in_local: &mut F, rvalue: &Rvalue<'tcx>) -> bool
+where
+    Q: Qualif,
+    F: FnMut(Local) -> bool,
+{
+    match rvalue {
+        Rvalue::ThreadLocalRef(_) | Rvalue::NullaryOp(..) => {
+            Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx))
+        }
+
+        Rvalue::Discriminant(place) | Rvalue::Len(place) => {
+            in_place::<Q, _>(cx, in_local, place.as_ref())
+        }
+
+        Rvalue::Use(operand)
+        | Rvalue::Repeat(operand, _)
+        | Rvalue::UnaryOp(_, operand)
+        | Rvalue::Cast(_, operand, _) => in_operand::<Q, _>(cx, in_local, operand),
+
+        Rvalue::BinaryOp(_, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(_, box (lhs, rhs)) => {
+            in_operand::<Q, _>(cx, in_local, lhs) || in_operand::<Q, _>(cx, in_local, rhs)
+        }
+
+        Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+            // Special-case reborrows to be more like a copy of the reference.
+            if let Some((place_base, ProjectionElem::Deref)) = place.as_ref().last_projection() {
+                let base_ty = place_base.ty(cx.body, cx.tcx).ty;
+                if let ty::Ref(..) = base_ty.kind() {
+                    return in_place::<Q, _>(cx, in_local, place_base);
+                }
+            }
+
+            in_place::<Q, _>(cx, in_local, place.as_ref())
+        }
+
+        Rvalue::Aggregate(kind, operands) => {
+            // Return early if we know that the struct or enum being constructed is always
+            // qualified.
+            if let AggregateKind::Adt(def, _, substs, ..) = **kind {
+                if Q::in_adt_inherently(cx, def, substs) {
+                    return true;
+                }
+            }
+
+            // Otherwise, proceed structurally...
+            operands.iter().any(|o| in_operand::<Q, _>(cx, in_local, o))
+        }
+    }
+}
+
+/// Returns `true` if this `Place` contains qualif `Q`.
+pub fn in_place<Q, F>(cx: &ConstCx<'_, 'tcx>, in_local: &mut F, place: PlaceRef<'tcx>) -> bool
+where
+    Q: Qualif,
+    F: FnMut(Local) -> bool,
+{
+    let mut place = place;
+    while let Some((place_base, elem)) = place.last_projection() {
+        match elem {
+            ProjectionElem::Index(index) if in_local(index) => return true,
+
+            ProjectionElem::Deref
+            | ProjectionElem::Field(_, _)
+            | ProjectionElem::ConstantIndex { .. }
+            | ProjectionElem::Subslice { .. }
+            | ProjectionElem::Downcast(_, _)
+            | ProjectionElem::Index(_) => {}
+        }
+
+        let base_ty = place_base.ty(cx.body, cx.tcx);
+        let proj_ty = base_ty.projection_ty(cx.tcx, elem).ty;
+        if !Q::in_any_value_of_ty(cx, proj_ty) {
+            return false;
+        }
+
+        place = place_base;
+    }
+
+    assert!(place.projection.is_empty());
+    in_local(place.local)
+}
+
+/// Returns `true` if this `Operand` contains qualif `Q`.
+pub fn in_operand<Q, F>(cx: &ConstCx<'_, 'tcx>, in_local: &mut F, operand: &Operand<'tcx>) -> bool
+where
+    Q: Qualif,
+    F: FnMut(Local) -> bool,
+{
+    let constant = match operand {
+        Operand::Copy(place) | Operand::Move(place) => {
+            return in_place::<Q, _>(cx, in_local, place.as_ref());
+        }
+
+        Operand::Constant(c) => c,
+    };
+
+    // Check the qualifs of the value of `const` items.
+    if let Some(ct) = constant.literal.const_for_ty() {
+        if let ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs_: _, promoted }) = ct.val {
+            assert!(promoted.is_none());
+            // Don't peek inside trait associated constants.
+            if cx.tcx.trait_of_item(def.did).is_none() {
+                let qualifs = if let Some((did, param_did)) = def.as_const_arg() {
+                    cx.tcx.at(constant.span).mir_const_qualif_const_arg((did, param_did))
+                } else {
+                    cx.tcx.at(constant.span).mir_const_qualif(def.did)
+                };
+
+                if !Q::in_qualifs(&qualifs) {
+                    return false;
+                }
+
+                // Just in case the type is more specific than
+                // the definition, e.g., impl associated const
+                // with type parameters, take it into account.
+            }
+        }
+    }
+    // Otherwise use the qualifs of the type.
+    Q::in_any_value_of_ty(cx, constant.literal.ty())
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
new file mode 100644
index 00000000000..8e1b69a1d74
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
@@ -0,0 +1,216 @@
+//! Propagate `Qualif`s between locals and query the results.
+//!
+//! This contains the dataflow analysis used to track `Qualif`s on complex control-flow graphs.
+
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{self, BasicBlock, Local, Location};
+
+use std::marker::PhantomData;
+
+use super::{qualifs, ConstCx, Qualif};
+
+/// A `Visitor` that propagates qualifs between locals. This defines the transfer function of
+/// `FlowSensitiveAnalysis`.
+///
+/// This transfer does nothing when encountering an indirect assignment. Consumers should rely on
+/// the `MaybeMutBorrowedLocals` dataflow pass to see if a `Local` may have become qualified via
+/// an indirect assignment or function call.
+struct TransferFunction<'a, 'mir, 'tcx, Q> {
+    ccx: &'a ConstCx<'mir, 'tcx>,
+    qualifs_per_local: &'a mut BitSet<Local>,
+
+    _qualif: PhantomData<Q>,
+}
+
+impl<Q> TransferFunction<'a, 'mir, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    fn new(ccx: &'a ConstCx<'mir, 'tcx>, qualifs_per_local: &'a mut BitSet<Local>) -> Self {
+        TransferFunction { ccx, qualifs_per_local, _qualif: PhantomData }
+    }
+
+    fn initialize_state(&mut self) {
+        self.qualifs_per_local.clear();
+
+        for arg in self.ccx.body.args_iter() {
+            let arg_ty = self.ccx.body.local_decls[arg].ty;
+            if Q::in_any_value_of_ty(self.ccx, arg_ty) {
+                self.qualifs_per_local.insert(arg);
+            }
+        }
+    }
+
+    fn assign_qualif_direct(&mut self, place: &mir::Place<'tcx>, value: bool) {
+        debug_assert!(!place.is_indirect());
+
+        match (value, place.as_ref()) {
+            (true, mir::PlaceRef { local, .. }) => {
+                self.qualifs_per_local.insert(local);
+            }
+
+            // For now, we do not clear the qualif if a local is overwritten in full by
+            // an unqualified rvalue (e.g. `y = 5`). This is to be consistent
+            // with aggregates where we overwrite all fields with assignments, which would not
+            // get this feature.
+            (false, mir::PlaceRef { local: _, projection: &[] }) => {
+                // self.qualifs_per_local.remove(*local);
+            }
+
+            _ => {}
+        }
+    }
+
+    fn apply_call_return_effect(
+        &mut self,
+        _block: BasicBlock,
+        _func: &mir::Operand<'tcx>,
+        _args: &[mir::Operand<'tcx>],
+        return_place: mir::Place<'tcx>,
+    ) {
+        // We cannot reason about another function's internals, so use conservative type-based
+        // qualification for the result of a function call.
+        let return_ty = return_place.ty(self.ccx.body, self.ccx.tcx).ty;
+        let qualif = Q::in_any_value_of_ty(self.ccx, return_ty);
+
+        if !return_place.is_indirect() {
+            self.assign_qualif_direct(&return_place, qualif);
+        }
+    }
+}
+
+impl<Q> Visitor<'tcx> for TransferFunction<'_, '_, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) {
+        self.super_operand(operand, location);
+
+        if !Q::IS_CLEARED_ON_MOVE {
+            return;
+        }
+
+        // If a local with no projections is moved from (e.g. `x` in `y = x`), record that
+        // it no longer needs to be dropped.
+        if let mir::Operand::Move(place) = operand {
+            if let Some(local) = place.as_local() {
+                self.qualifs_per_local.remove(local);
+            }
+        }
+    }
+
+    fn visit_assign(
+        &mut self,
+        place: &mir::Place<'tcx>,
+        rvalue: &mir::Rvalue<'tcx>,
+        location: Location,
+    ) {
+        let qualif = qualifs::in_rvalue::<Q, _>(
+            self.ccx,
+            &mut |l| self.qualifs_per_local.contains(l),
+            rvalue,
+        );
+        if !place.is_indirect() {
+            self.assign_qualif_direct(place, qualif);
+        }
+
+        // We need to assign qualifs to the left-hand side before visiting `rvalue` since
+        // qualifs can be cleared on move.
+        self.super_assign(place, rvalue, location);
+    }
+
+    fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+        // The effect of assignment to the return place in `TerminatorKind::Call` is not applied
+        // here; that occurs in `apply_call_return_effect`.
+
+        if let mir::TerminatorKind::DropAndReplace { value, place, .. } = &terminator.kind {
+            let qualif = qualifs::in_operand::<Q, _>(
+                self.ccx,
+                &mut |l| self.qualifs_per_local.contains(l),
+                value,
+            );
+
+            if !place.is_indirect() {
+                self.assign_qualif_direct(place, qualif);
+            }
+        }
+
+        // We need to assign qualifs to the dropped location before visiting the operand that
+        // replaces it since qualifs can be cleared on move.
+        self.super_terminator(terminator, location);
+    }
+}
+
+/// The dataflow analysis used to propagate qualifs on arbitrary CFGs.
+pub(super) struct FlowSensitiveAnalysis<'a, 'mir, 'tcx, Q> {
+    ccx: &'a ConstCx<'mir, 'tcx>,
+    _qualif: PhantomData<Q>,
+}
+
+impl<'a, 'mir, 'tcx, Q> FlowSensitiveAnalysis<'a, 'mir, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    pub(super) fn new(_: Q, ccx: &'a ConstCx<'mir, 'tcx>) -> Self {
+        FlowSensitiveAnalysis { ccx, _qualif: PhantomData }
+    }
+
+    fn transfer_function(
+        &self,
+        state: &'a mut BitSet<Local>,
+    ) -> TransferFunction<'a, 'mir, 'tcx, Q> {
+        TransferFunction::<Q>::new(self.ccx, state)
+    }
+}
+
+impl<Q> rustc_mir_dataflow::AnalysisDomain<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    type Domain = BitSet<Local>;
+
+    const NAME: &'static str = Q::ANALYSIS_NAME;
+
+    fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+        BitSet::new_empty(body.local_decls.len())
+    }
+
+    fn initialize_start_block(&self, _body: &mir::Body<'tcx>, state: &mut Self::Domain) {
+        self.transfer_function(state).initialize_state();
+    }
+}
+
+impl<Q> rustc_mir_dataflow::Analysis<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    fn apply_statement_effect(
+        &self,
+        state: &mut Self::Domain,
+        statement: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        self.transfer_function(state).visit_statement(statement, location);
+    }
+
+    fn apply_terminator_effect(
+        &self,
+        state: &mut Self::Domain,
+        terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    ) {
+        self.transfer_function(state).visit_terminator(terminator, location);
+    }
+
+    fn apply_call_return_effect(
+        &self,
+        state: &mut Self::Domain,
+        block: BasicBlock,
+        func: &mir::Operand<'tcx>,
+        args: &[mir::Operand<'tcx>],
+        return_place: mir::Place<'tcx>,
+    ) {
+        self.transfer_function(state).apply_call_return_effect(block, func, args, return_place)
+    }
+}
diff --git a/compiler/rustc_const_eval/src/transform/mod.rs b/compiler/rustc_const_eval/src/transform/mod.rs
new file mode 100644
index 00000000000..38c28f34934
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/mod.rs
@@ -0,0 +1,5 @@
+pub mod check_consts;
+pub mod promote_consts;
+pub mod validate;
+
+pub use rustc_middle::mir::MirPass;
diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs
new file mode 100644
index 00000000000..6822ad2d7b5
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs
@@ -0,0 +1,1092 @@
+//! A pass that promotes borrows of constant rvalues.
+//!
+//! The rvalues considered constant are trees of temps,
+//! each with exactly one initialization, and holding
+//! a constant value with no interior mutability.
+//! They are placed into a new MIR constant body in
+//! `promoted` and the borrow rvalue is replaced with
+//! a `Literal::Promoted` using the index into `promoted`
+//! of that constant MIR.
+//!
+//! This pass assumes that every use is dominated by an
+//! initialization and can otherwise silence errors, if
+//! move analysis runs after promotion on broken MIR.
+
+use rustc_hir as hir;
+use rustc_middle::mir::traversal::ReversePostorder;
+use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::cast::CastTy;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::{self, List, TyCtxt, TypeFoldable};
+use rustc_span::Span;
+
+use rustc_index::vec::{Idx, IndexVec};
+
+use std::cell::Cell;
+use std::{cmp, iter, mem};
+
+use crate::const_eval::{is_const_fn, is_unstable_const_fn};
+use crate::transform::check_consts::{is_lang_panic_fn, qualifs, ConstCx};
+use crate::transform::MirPass;
+
+/// A `MirPass` for promotion.
+///
+/// Promotion is the extraction of promotable temps into separate MIR bodies so they can have
+/// `'static` lifetime.
+///
+/// After this pass is run, `promoted_fragments` will hold the MIR body corresponding to each
+/// newly created `Constant`.
+#[derive(Default)]
+pub struct PromoteTemps<'tcx> {
+    pub promoted_fragments: Cell<IndexVec<Promoted, Body<'tcx>>>,
+}
+
+impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        // There's not really any point in promoting errorful MIR.
+        //
+        // This does not include MIR that failed const-checking, which we still try to promote.
+        if body.return_ty().references_error() {
+            tcx.sess.delay_span_bug(body.span, "PromoteTemps: MIR had errors");
+            return;
+        }
+
+        if body.source.promoted.is_some() {
+            return;
+        }
+
+        let mut rpo = traversal::reverse_postorder(body);
+        let ccx = ConstCx::new(tcx, body);
+        let (temps, all_candidates) = collect_temps_and_candidates(&ccx, &mut rpo);
+
+        let promotable_candidates = validate_candidates(&ccx, &temps, &all_candidates);
+
+        let promoted = promote_candidates(body, tcx, temps, promotable_candidates);
+        self.promoted_fragments.set(promoted);
+    }
+}
+
+/// State of a temporary during collection and promotion.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum TempState {
+    /// No references to this temp.
+    Undefined,
+    /// One direct assignment and any number of direct uses.
+    /// A borrow of this temp is promotable if the assigned
+    /// value is qualified as constant.
+    Defined { location: Location, uses: usize },
+    /// Any other combination of assignments/uses.
+    Unpromotable,
+    /// This temp was part of an rvalue which got extracted
+    /// during promotion and needs cleanup.
+    PromotedOut,
+}
+
+impl TempState {
+    pub fn is_promotable(&self) -> bool {
+        debug!("is_promotable: self={:?}", self);
+        matches!(self, TempState::Defined { .. })
+    }
+}
+
+/// A "root candidate" for promotion, which will become the
+/// returned value in a promoted MIR, unless it's a subset
+/// of a larger candidate.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum Candidate {
+    /// Borrow of a constant temporary, candidate for lifetime extension.
+    Ref(Location),
+}
+
+impl Candidate {
+    fn source_info(&self, body: &Body<'_>) -> SourceInfo {
+        match self {
+            Candidate::Ref(location) => *body.source_info(*location),
+        }
+    }
+}
+
+struct Collector<'a, 'tcx> {
+    ccx: &'a ConstCx<'a, 'tcx>,
+    temps: IndexVec<Local, TempState>,
+    candidates: Vec<Candidate>,
+}
+
+impl<'tcx> Visitor<'tcx> for Collector<'_, 'tcx> {
+    fn visit_local(&mut self, &index: &Local, context: PlaceContext, location: Location) {
+        debug!("visit_local: index={:?} context={:?} location={:?}", index, context, location);
+        // We're only interested in temporaries and the return place
+        match self.ccx.body.local_kind(index) {
+            LocalKind::Temp | LocalKind::ReturnPointer => {}
+            LocalKind::Arg | LocalKind::Var => return,
+        }
+
+        // Ignore drops, if the temp gets promoted,
+        // then it's constant and thus drop is noop.
+        // Non-uses are also irrelevant.
+        if context.is_drop() || !context.is_use() {
+            debug!(
+                "visit_local: context.is_drop={:?} context.is_use={:?}",
+                context.is_drop(),
+                context.is_use(),
+            );
+            return;
+        }
+
+        let temp = &mut self.temps[index];
+        debug!("visit_local: temp={:?}", temp);
+        if *temp == TempState::Undefined {
+            match context {
+                PlaceContext::MutatingUse(MutatingUseContext::Store)
+                | PlaceContext::MutatingUse(MutatingUseContext::Call) => {
+                    *temp = TempState::Defined { location, uses: 0 };
+                    return;
+                }
+                _ => { /* mark as unpromotable below */ }
+            }
+        } else if let TempState::Defined { ref mut uses, .. } = *temp {
+            // We always allow borrows, even mutable ones, as we need
+            // to promote mutable borrows of some ZSTs e.g., `&mut []`.
+            let allowed_use = match context {
+                PlaceContext::MutatingUse(MutatingUseContext::Borrow)
+                | PlaceContext::NonMutatingUse(_) => true,
+                PlaceContext::MutatingUse(_) | PlaceContext::NonUse(_) => false,
+            };
+            debug!("visit_local: allowed_use={:?}", allowed_use);
+            if allowed_use {
+                *uses += 1;
+                return;
+            }
+            /* mark as unpromotable below */
+        }
+        *temp = TempState::Unpromotable;
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+        self.super_rvalue(rvalue, location);
+
+        match *rvalue {
+            Rvalue::Ref(..) => {
+                self.candidates.push(Candidate::Ref(location));
+            }
+            _ => {}
+        }
+    }
+}
+
+pub fn collect_temps_and_candidates(
+    ccx: &ConstCx<'mir, 'tcx>,
+    rpo: &mut ReversePostorder<'_, 'tcx>,
+) -> (IndexVec<Local, TempState>, Vec<Candidate>) {
+    let mut collector = Collector {
+        temps: IndexVec::from_elem(TempState::Undefined, &ccx.body.local_decls),
+        candidates: vec![],
+        ccx,
+    };
+    for (bb, data) in rpo {
+        collector.visit_basic_block_data(bb, data);
+    }
+    (collector.temps, collector.candidates)
+}
+
+/// Checks whether locals that appear in a promotion context (`Candidate`) are actually promotable.
+///
+/// This wraps an `Item`, and has access to all fields of that `Item` via `Deref` coercion.
+struct Validator<'a, 'tcx> {
+    ccx: &'a ConstCx<'a, 'tcx>,
+    temps: &'a IndexVec<Local, TempState>,
+}
+
+impl std::ops::Deref for Validator<'a, 'tcx> {
+    type Target = ConstCx<'a, 'tcx>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.ccx
+    }
+}
+
+struct Unpromotable;
+
+impl<'tcx> Validator<'_, 'tcx> {
+    fn validate_candidate(&self, candidate: Candidate) -> Result<(), Unpromotable> {
+        match candidate {
+            Candidate::Ref(loc) => {
+                let statement = &self.body[loc.block].statements[loc.statement_index];
+                match &statement.kind {
+                    StatementKind::Assign(box (_, Rvalue::Ref(_, kind, place))) => {
+                        // We can only promote interior borrows of promotable temps (non-temps
+                        // don't get promoted anyway).
+                        self.validate_local(place.local)?;
+
+                        // The reference operation itself must be promotable.
+                        // (Needs to come after `validate_local` to avoid ICEs.)
+                        self.validate_ref(*kind, place)?;
+
+                        // We do not check all the projections (they do not get promoted anyway),
+                        // but we do stay away from promoting anything involving a dereference.
+                        if place.projection.contains(&ProjectionElem::Deref) {
+                            return Err(Unpromotable);
+                        }
+
+                        // We cannot promote things that need dropping, since the promoted value
+                        // would not get dropped.
+                        if self.qualif_local::<qualifs::NeedsDrop>(place.local) {
+                            return Err(Unpromotable);
+                        }
+
+                        Ok(())
+                    }
+                    _ => bug!(),
+                }
+            }
+        }
+    }
+
+    // FIXME(eddyb) maybe cache this?
+    fn qualif_local<Q: qualifs::Qualif>(&self, local: Local) -> bool {
+        if let TempState::Defined { location: loc, .. } = self.temps[local] {
+            let num_stmts = self.body[loc.block].statements.len();
+
+            if loc.statement_index < num_stmts {
+                let statement = &self.body[loc.block].statements[loc.statement_index];
+                match &statement.kind {
+                    StatementKind::Assign(box (_, rhs)) => qualifs::in_rvalue::<Q, _>(
+                        &self.ccx,
+                        &mut |l| self.qualif_local::<Q>(l),
+                        rhs,
+                    ),
+                    _ => {
+                        span_bug!(
+                            statement.source_info.span,
+                            "{:?} is not an assignment",
+                            statement
+                        );
+                    }
+                }
+            } else {
+                let terminator = self.body[loc.block].terminator();
+                match &terminator.kind {
+                    TerminatorKind::Call { .. } => {
+                        let return_ty = self.body.local_decls[local].ty;
+                        Q::in_any_value_of_ty(&self.ccx, return_ty)
+                    }
+                    kind => {
+                        span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+                    }
+                }
+            }
+        } else {
+            let span = self.body.local_decls[local].source_info.span;
+            span_bug!(span, "{:?} not promotable, qualif_local shouldn't have been called", local);
+        }
+    }
+
+    // FIXME(eddyb) maybe cache this?
+    fn validate_local(&self, local: Local) -> Result<(), Unpromotable> {
+        if let TempState::Defined { location: loc, .. } = self.temps[local] {
+            let block = &self.body[loc.block];
+            let num_stmts = block.statements.len();
+
+            if loc.statement_index < num_stmts {
+                let statement = &block.statements[loc.statement_index];
+                match &statement.kind {
+                    StatementKind::Assign(box (_, rhs)) => self.validate_rvalue(rhs),
+                    _ => {
+                        span_bug!(
+                            statement.source_info.span,
+                            "{:?} is not an assignment",
+                            statement
+                        );
+                    }
+                }
+            } else {
+                let terminator = block.terminator();
+                match &terminator.kind {
+                    TerminatorKind::Call { func, args, .. } => self.validate_call(func, args),
+                    TerminatorKind::Yield { .. } => Err(Unpromotable),
+                    kind => {
+                        span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+                    }
+                }
+            }
+        } else {
+            Err(Unpromotable)
+        }
+    }
+
+    fn validate_place(&self, place: PlaceRef<'tcx>) -> Result<(), Unpromotable> {
+        match place.last_projection() {
+            None => self.validate_local(place.local),
+            Some((place_base, elem)) => {
+                // Validate topmost projection, then recurse.
+                match elem {
+                    ProjectionElem::Deref => {
+                        let mut promotable = false;
+                        // We need to make sure this is a `Deref` of a local with no further projections.
+                        // Discussion can be found at
+                        // https://github.com/rust-lang/rust/pull/74945#discussion_r463063247
+                        if let Some(local) = place_base.as_local() {
+                            // This is a special treatment for cases like *&STATIC where STATIC is a
+                            // global static variable.
+                            // This pattern is generated only when global static variables are directly
+                            // accessed and is qualified for promotion safely.
+                            if let TempState::Defined { location, .. } = self.temps[local] {
+                                let def_stmt = self.body[location.block]
+                                    .statements
+                                    .get(location.statement_index);
+                                if let Some(Statement {
+                                    kind:
+                                        StatementKind::Assign(box (
+                                            _,
+                                            Rvalue::Use(Operand::Constant(c)),
+                                        )),
+                                    ..
+                                }) = def_stmt
+                                {
+                                    if let Some(did) = c.check_static_ptr(self.tcx) {
+                                        // Evaluating a promoted may not read statics except if it got
+                                        // promoted from a static (this is a CTFE check). So we
+                                        // can only promote static accesses inside statics.
+                                        if let Some(hir::ConstContext::Static(..)) = self.const_kind
+                                        {
+                                            if !self.tcx.is_thread_local_static(did) {
+                                                promotable = true;
+                                            }
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                        if !promotable {
+                            return Err(Unpromotable);
+                        }
+                    }
+                    ProjectionElem::Downcast(..) => {
+                        return Err(Unpromotable);
+                    }
+
+                    ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. } => {}
+
+                    ProjectionElem::Index(local) => {
+                        let mut promotable = false;
+                        // Only accept if we can predict the index and are indexing an array.
+                        let val =
+                            if let TempState::Defined { location: loc, .. } = self.temps[local] {
+                                let block = &self.body[loc.block];
+                                if loc.statement_index < block.statements.len() {
+                                    let statement = &block.statements[loc.statement_index];
+                                    match &statement.kind {
+                                        StatementKind::Assign(box (
+                                            _,
+                                            Rvalue::Use(Operand::Constant(c)),
+                                        )) => c.literal.try_eval_usize(self.tcx, self.param_env),
+                                        _ => None,
+                                    }
+                                } else {
+                                    None
+                                }
+                            } else {
+                                None
+                            };
+                        if let Some(idx) = val {
+                            // Determine the type of the thing we are indexing.
+                            let ty = place_base.ty(self.body, self.tcx).ty;
+                            match ty.kind() {
+                                ty::Array(_, len) => {
+                                    // It's an array; determine its length.
+                                    if let Some(len) = len.try_eval_usize(self.tcx, self.param_env)
+                                    {
+                                        // If the index is in-bounds, go ahead.
+                                        if idx < len {
+                                            promotable = true;
+                                        }
+                                    }
+                                }
+                                _ => {}
+                            }
+                        }
+                        if !promotable {
+                            return Err(Unpromotable);
+                        }
+
+                        self.validate_local(local)?;
+                    }
+
+                    ProjectionElem::Field(..) => {
+                        let base_ty = place_base.ty(self.body, self.tcx).ty;
+                        if base_ty.is_union() {
+                            // No promotion of union field accesses.
+                            return Err(Unpromotable);
+                        }
+                    }
+                }
+
+                self.validate_place(place_base)
+            }
+        }
+    }
+
+    fn validate_operand(&self, operand: &Operand<'tcx>) -> Result<(), Unpromotable> {
+        match operand {
+            Operand::Copy(place) | Operand::Move(place) => self.validate_place(place.as_ref()),
+
+            // The qualifs for a constant (e.g. `HasMutInterior`) are checked in
+            // `validate_rvalue` upon access.
+            Operand::Constant(c) => {
+                if let Some(def_id) = c.check_static_ptr(self.tcx) {
+                    // Only allow statics (not consts) to refer to other statics.
+                    // FIXME(eddyb) does this matter at all for promotion?
+                    // FIXME(RalfJung) it makes little sense to not promote this in `fn`/`const fn`,
+                    // and in `const` this cannot occur anyway. The only concern is that we might
+                    // promote even `let x = &STATIC` which would be useless, but this applies to
+                    // promotion inside statics as well.
+                    let is_static = matches!(self.const_kind, Some(hir::ConstContext::Static(_)));
+                    if !is_static {
+                        return Err(Unpromotable);
+                    }
+
+                    let is_thread_local = self.tcx.is_thread_local_static(def_id);
+                    if is_thread_local {
+                        return Err(Unpromotable);
+                    }
+                }
+
+                Ok(())
+            }
+        }
+    }
+
+    fn validate_ref(&self, kind: BorrowKind, place: &Place<'tcx>) -> Result<(), Unpromotable> {
+        match kind {
+            // Reject these borrow types just to be safe.
+            // FIXME(RalfJung): could we allow them? Should we? No point in it until we have a usecase.
+            BorrowKind::Shallow | BorrowKind::Unique => return Err(Unpromotable),
+
+            BorrowKind::Shared => {
+                let has_mut_interior = self.qualif_local::<qualifs::HasMutInterior>(place.local);
+                if has_mut_interior {
+                    return Err(Unpromotable);
+                }
+            }
+
+            BorrowKind::Mut { .. } => {
+                let ty = place.ty(self.body, self.tcx).ty;
+
+                // In theory, any zero-sized value could be borrowed
+                // mutably without consequences. However, only &mut []
+                // is allowed right now.
+                if let ty::Array(_, len) = ty.kind() {
+                    match len.try_eval_usize(self.tcx, self.param_env) {
+                        Some(0) => {}
+                        _ => return Err(Unpromotable),
+                    }
+                } else {
+                    return Err(Unpromotable);
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    fn validate_rvalue(&self, rvalue: &Rvalue<'tcx>) -> Result<(), Unpromotable> {
+        match rvalue {
+            Rvalue::Use(operand) | Rvalue::Repeat(operand, _) => {
+                self.validate_operand(operand)?;
+            }
+
+            Rvalue::Discriminant(place) | Rvalue::Len(place) => {
+                self.validate_place(place.as_ref())?
+            }
+
+            Rvalue::ThreadLocalRef(_) => return Err(Unpromotable),
+
+            Rvalue::Cast(kind, operand, cast_ty) => {
+                if matches!(kind, CastKind::Misc) {
+                    let operand_ty = operand.ty(self.body, self.tcx);
+                    let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast");
+                    let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
+                    if let (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) = (cast_in, cast_out) {
+                        // ptr-to-int casts are not possible in consts and thus not promotable
+                        return Err(Unpromotable);
+                    }
+                    // int-to-ptr casts are fine, they just use the integer value at pointer type.
+                }
+
+                self.validate_operand(operand)?;
+            }
+
+            Rvalue::NullaryOp(op, _) => match op {
+                NullOp::Box => return Err(Unpromotable),
+                NullOp::SizeOf => {}
+            },
+
+            Rvalue::UnaryOp(op, operand) => {
+                match op {
+                    // These operations can never fail.
+                    UnOp::Neg | UnOp::Not => {}
+                }
+
+                self.validate_operand(operand)?;
+            }
+
+            Rvalue::BinaryOp(op, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(op, box (lhs, rhs)) => {
+                let op = *op;
+                let lhs_ty = lhs.ty(self.body, self.tcx);
+
+                if let ty::RawPtr(_) | ty::FnPtr(..) = lhs_ty.kind() {
+                    // Raw and fn pointer operations are not allowed inside consts and thus not promotable.
+                    assert!(matches!(
+                        op,
+                        BinOp::Eq
+                            | BinOp::Ne
+                            | BinOp::Le
+                            | BinOp::Lt
+                            | BinOp::Ge
+                            | BinOp::Gt
+                            | BinOp::Offset
+                    ));
+                    return Err(Unpromotable);
+                }
+
+                match op {
+                    BinOp::Div | BinOp::Rem => {
+                        if lhs_ty.is_integral() {
+                            // Integer division: the RHS must be a non-zero const.
+                            let const_val = match rhs {
+                                Operand::Constant(c) => {
+                                    c.literal.try_eval_bits(self.tcx, self.param_env, lhs_ty)
+                                }
+                                _ => None,
+                            };
+                            match const_val {
+                                Some(x) if x != 0 => {}        // okay
+                                _ => return Err(Unpromotable), // value not known or 0 -- not okay
+                            }
+                        }
+                    }
+                    // The remaining operations can never fail.
+                    BinOp::Eq
+                    | BinOp::Ne
+                    | BinOp::Le
+                    | BinOp::Lt
+                    | BinOp::Ge
+                    | BinOp::Gt
+                    | BinOp::Offset
+                    | BinOp::Add
+                    | BinOp::Sub
+                    | BinOp::Mul
+                    | BinOp::BitXor
+                    | BinOp::BitAnd
+                    | BinOp::BitOr
+                    | BinOp::Shl
+                    | BinOp::Shr => {}
+                }
+
+                self.validate_operand(lhs)?;
+                self.validate_operand(rhs)?;
+            }
+
+            Rvalue::AddressOf(_, place) => {
+                // We accept `&raw *`, i.e., raw reborrows -- creating a raw pointer is
+                // no problem, only using it is.
+                if let Some((place_base, ProjectionElem::Deref)) = place.as_ref().last_projection()
+                {
+                    let base_ty = place_base.ty(self.body, self.tcx).ty;
+                    if let ty::Ref(..) = base_ty.kind() {
+                        return self.validate_place(place_base);
+                    }
+                }
+                return Err(Unpromotable);
+            }
+
+            Rvalue::Ref(_, kind, place) => {
+                // Special-case reborrows to be more like a copy of the reference.
+                let mut place_simplified = place.as_ref();
+                if let Some((place_base, ProjectionElem::Deref)) =
+                    place_simplified.last_projection()
+                {
+                    let base_ty = place_base.ty(self.body, self.tcx).ty;
+                    if let ty::Ref(..) = base_ty.kind() {
+                        place_simplified = place_base;
+                    }
+                }
+
+                self.validate_place(place_simplified)?;
+
+                // Check that the reference is fine (using the original place!).
+                // (Needs to come after `validate_place` to avoid ICEs.)
+                self.validate_ref(*kind, place)?;
+            }
+
+            Rvalue::Aggregate(_, operands) => {
+                for o in operands {
+                    self.validate_operand(o)?;
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    fn validate_call(
+        &self,
+        callee: &Operand<'tcx>,
+        args: &[Operand<'tcx>],
+    ) -> Result<(), Unpromotable> {
+        let fn_ty = callee.ty(self.body, self.tcx);
+
+        // Inside const/static items, we promote all (eligible) function calls.
+        // Everywhere else, we require `#[rustc_promotable]` on the callee.
+        let promote_all_const_fn = matches!(
+            self.const_kind,
+            Some(hir::ConstContext::Static(_) | hir::ConstContext::Const)
+        );
+        if !promote_all_const_fn {
+            if let ty::FnDef(def_id, _) = *fn_ty.kind() {
+                // Never promote runtime `const fn` calls of
+                // functions without `#[rustc_promotable]`.
+                if !self.tcx.is_promotable_const_fn(def_id) {
+                    return Err(Unpromotable);
+                }
+            }
+        }
+
+        let is_const_fn = match *fn_ty.kind() {
+            ty::FnDef(def_id, _) => {
+                is_const_fn(self.tcx, def_id)
+                    || is_unstable_const_fn(self.tcx, def_id).is_some()
+                    || is_lang_panic_fn(self.tcx, def_id)
+            }
+            _ => false,
+        };
+        if !is_const_fn {
+            return Err(Unpromotable);
+        }
+
+        self.validate_operand(callee)?;
+        for arg in args {
+            self.validate_operand(arg)?;
+        }
+
+        Ok(())
+    }
+}
+
+// FIXME(eddyb) remove the differences for promotability in `static`, `const`, `const fn`.
+pub fn validate_candidates(
+    ccx: &ConstCx<'_, '_>,
+    temps: &IndexVec<Local, TempState>,
+    candidates: &[Candidate],
+) -> Vec<Candidate> {
+    let validator = Validator { ccx, temps };
+
+    candidates
+        .iter()
+        .copied()
+        .filter(|&candidate| validator.validate_candidate(candidate).is_ok())
+        .collect()
+}
+
+struct Promoter<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    source: &'a mut Body<'tcx>,
+    promoted: Body<'tcx>,
+    temps: &'a mut IndexVec<Local, TempState>,
+    extra_statements: &'a mut Vec<(Location, Statement<'tcx>)>,
+
+    /// If true, all nested temps are also kept in the
+    /// source MIR, not moved to the promoted MIR.
+    keep_original: bool,
+}
+
+impl<'a, 'tcx> Promoter<'a, 'tcx> {
+    fn new_block(&mut self) -> BasicBlock {
+        let span = self.promoted.span;
+        self.promoted.basic_blocks_mut().push(BasicBlockData {
+            statements: vec![],
+            terminator: Some(Terminator {
+                source_info: SourceInfo::outermost(span),
+                kind: TerminatorKind::Return,
+            }),
+            is_cleanup: false,
+        })
+    }
+
+    fn assign(&mut self, dest: Local, rvalue: Rvalue<'tcx>, span: Span) {
+        let last = self.promoted.basic_blocks().last().unwrap();
+        let data = &mut self.promoted[last];
+        data.statements.push(Statement {
+            source_info: SourceInfo::outermost(span),
+            kind: StatementKind::Assign(Box::new((Place::from(dest), rvalue))),
+        });
+    }
+
+    fn is_temp_kind(&self, local: Local) -> bool {
+        self.source.local_kind(local) == LocalKind::Temp
+    }
+
+    /// Copies the initialization of this temp to the
+    /// promoted MIR, recursing through temps.
+    fn promote_temp(&mut self, temp: Local) -> Local {
+        let old_keep_original = self.keep_original;
+        let loc = match self.temps[temp] {
+            TempState::Defined { location, uses } if uses > 0 => {
+                if uses > 1 {
+                    self.keep_original = true;
+                }
+                location
+            }
+            state => {
+                span_bug!(self.promoted.span, "{:?} not promotable: {:?}", temp, state);
+            }
+        };
+        if !self.keep_original {
+            self.temps[temp] = TempState::PromotedOut;
+        }
+
+        let num_stmts = self.source[loc.block].statements.len();
+        let new_temp = self.promoted.local_decls.push(LocalDecl::new(
+            self.source.local_decls[temp].ty,
+            self.source.local_decls[temp].source_info.span,
+        ));
+
+        debug!("promote({:?} @ {:?}/{:?}, {:?})", temp, loc, num_stmts, self.keep_original);
+
+        // First, take the Rvalue or Call out of the source MIR,
+        // or duplicate it, depending on keep_original.
+        if loc.statement_index < num_stmts {
+            let (mut rvalue, source_info) = {
+                let statement = &mut self.source[loc.block].statements[loc.statement_index];
+                let rhs = match statement.kind {
+                    StatementKind::Assign(box (_, ref mut rhs)) => rhs,
+                    _ => {
+                        span_bug!(
+                            statement.source_info.span,
+                            "{:?} is not an assignment",
+                            statement
+                        );
+                    }
+                };
+
+                (
+                    if self.keep_original {
+                        rhs.clone()
+                    } else {
+                        let unit = Rvalue::Use(Operand::Constant(Box::new(Constant {
+                            span: statement.source_info.span,
+                            user_ty: None,
+                            literal: ty::Const::zero_sized(self.tcx, self.tcx.types.unit).into(),
+                        })));
+                        mem::replace(rhs, unit)
+                    },
+                    statement.source_info,
+                )
+            };
+
+            self.visit_rvalue(&mut rvalue, loc);
+            self.assign(new_temp, rvalue, source_info.span);
+        } else {
+            let terminator = if self.keep_original {
+                self.source[loc.block].terminator().clone()
+            } else {
+                let terminator = self.source[loc.block].terminator_mut();
+                let target = match terminator.kind {
+                    TerminatorKind::Call { destination: Some((_, target)), .. } => target,
+                    ref kind => {
+                        span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+                    }
+                };
+                Terminator {
+                    source_info: terminator.source_info,
+                    kind: mem::replace(&mut terminator.kind, TerminatorKind::Goto { target }),
+                }
+            };
+
+            match terminator.kind {
+                TerminatorKind::Call { mut func, mut args, from_hir_call, fn_span, .. } => {
+                    self.visit_operand(&mut func, loc);
+                    for arg in &mut args {
+                        self.visit_operand(arg, loc);
+                    }
+
+                    let last = self.promoted.basic_blocks().last().unwrap();
+                    let new_target = self.new_block();
+
+                    *self.promoted[last].terminator_mut() = Terminator {
+                        kind: TerminatorKind::Call {
+                            func,
+                            args,
+                            cleanup: None,
+                            destination: Some((Place::from(new_temp), new_target)),
+                            from_hir_call,
+                            fn_span,
+                        },
+                        source_info: SourceInfo::outermost(terminator.source_info.span),
+                        ..terminator
+                    };
+                }
+                ref kind => {
+                    span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+                }
+            };
+        };
+
+        self.keep_original = old_keep_original;
+        new_temp
+    }
+
+    fn promote_candidate(
+        mut self,
+        candidate: Candidate,
+        next_promoted_id: usize,
+    ) -> Option<Body<'tcx>> {
+        let def = self.source.source.with_opt_param();
+        let mut rvalue = {
+            let promoted = &mut self.promoted;
+            let promoted_id = Promoted::new(next_promoted_id);
+            let tcx = self.tcx;
+            let mut promoted_operand = |ty, span| {
+                promoted.span = span;
+                promoted.local_decls[RETURN_PLACE] = LocalDecl::new(ty, span);
+
+                Operand::Constant(Box::new(Constant {
+                    span,
+                    user_ty: None,
+                    literal: tcx
+                        .mk_const(ty::Const {
+                            ty,
+                            val: ty::ConstKind::Unevaluated(ty::Unevaluated {
+                                def,
+                                substs_: Some(InternalSubsts::for_item(
+                                    tcx,
+                                    def.did,
+                                    |param, _| {
+                                        if let ty::GenericParamDefKind::Lifetime = param.kind {
+                                            tcx.lifetimes.re_erased.into()
+                                        } else {
+                                            tcx.mk_param_from_def(param)
+                                        }
+                                    },
+                                )),
+                                promoted: Some(promoted_id),
+                            }),
+                        })
+                        .into(),
+                }))
+            };
+            let (blocks, local_decls) = self.source.basic_blocks_and_local_decls_mut();
+            match candidate {
+                Candidate::Ref(loc) => {
+                    let statement = &mut blocks[loc.block].statements[loc.statement_index];
+                    match statement.kind {
+                        StatementKind::Assign(box (
+                            _,
+                            Rvalue::Ref(ref mut region, borrow_kind, ref mut place),
+                        )) => {
+                            // Use the underlying local for this (necessarily interior) borrow.
+                            let ty = local_decls.local_decls()[place.local].ty;
+                            let span = statement.source_info.span;
+
+                            let ref_ty = tcx.mk_ref(
+                                tcx.lifetimes.re_erased,
+                                ty::TypeAndMut { ty, mutbl: borrow_kind.to_mutbl_lossy() },
+                            );
+
+                            *region = tcx.lifetimes.re_erased;
+
+                            let mut projection = vec![PlaceElem::Deref];
+                            projection.extend(place.projection);
+                            place.projection = tcx.intern_place_elems(&projection);
+
+                            // Create a temp to hold the promoted reference.
+                            // This is because `*r` requires `r` to be a local,
+                            // otherwise we would use the `promoted` directly.
+                            let mut promoted_ref = LocalDecl::new(ref_ty, span);
+                            promoted_ref.source_info = statement.source_info;
+                            let promoted_ref = local_decls.push(promoted_ref);
+                            assert_eq!(self.temps.push(TempState::Unpromotable), promoted_ref);
+
+                            let promoted_ref_statement = Statement {
+                                source_info: statement.source_info,
+                                kind: StatementKind::Assign(Box::new((
+                                    Place::from(promoted_ref),
+                                    Rvalue::Use(promoted_operand(ref_ty, span)),
+                                ))),
+                            };
+                            self.extra_statements.push((loc, promoted_ref_statement));
+
+                            Rvalue::Ref(
+                                tcx.lifetimes.re_erased,
+                                borrow_kind,
+                                Place {
+                                    local: mem::replace(&mut place.local, promoted_ref),
+                                    projection: List::empty(),
+                                },
+                            )
+                        }
+                        _ => bug!(),
+                    }
+                }
+            }
+        };
+
+        assert_eq!(self.new_block(), START_BLOCK);
+        self.visit_rvalue(
+            &mut rvalue,
+            Location { block: BasicBlock::new(0), statement_index: usize::MAX },
+        );
+
+        let span = self.promoted.span;
+        self.assign(RETURN_PLACE, rvalue, span);
+        Some(self.promoted)
+    }
+}
+
+/// Replaces all temporaries with their promoted counterparts.
+impl<'a, 'tcx> MutVisitor<'tcx> for Promoter<'a, 'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
+        if self.is_temp_kind(*local) {
+            *local = self.promote_temp(*local);
+        }
+    }
+}
+
+pub fn promote_candidates<'tcx>(
+    body: &mut Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    mut temps: IndexVec<Local, TempState>,
+    candidates: Vec<Candidate>,
+) -> IndexVec<Promoted, Body<'tcx>> {
+    // Visit candidates in reverse, in case they're nested.
+    debug!("promote_candidates({:?})", candidates);
+
+    let mut promotions = IndexVec::new();
+
+    let mut extra_statements = vec![];
+    for candidate in candidates.into_iter().rev() {
+        match candidate {
+            Candidate::Ref(Location { block, statement_index }) => {
+                if let StatementKind::Assign(box (place, _)) =
+                    &body[block].statements[statement_index].kind
+                {
+                    if let Some(local) = place.as_local() {
+                        if temps[local] == TempState::PromotedOut {
+                            // Already promoted.
+                            continue;
+                        }
+                    }
+                }
+            }
+        }
+
+        // Declare return place local so that `mir::Body::new` doesn't complain.
+        let initial_locals = iter::once(LocalDecl::new(tcx.types.never, body.span)).collect();
+
+        let mut scope = body.source_scopes[candidate.source_info(body).scope].clone();
+        scope.parent_scope = None;
+
+        let promoted = Body::new(
+            tcx,
+            body.source, // `promoted` gets filled in below
+            IndexVec::new(),
+            IndexVec::from_elem_n(scope, 1),
+            initial_locals,
+            IndexVec::new(),
+            0,
+            vec![],
+            body.span,
+            body.generator_kind(),
+        );
+
+        let promoter = Promoter {
+            promoted,
+            tcx,
+            source: body,
+            temps: &mut temps,
+            extra_statements: &mut extra_statements,
+            keep_original: false,
+        };
+
+        //FIXME(oli-obk): having a `maybe_push()` method on `IndexVec` might be nice
+        if let Some(mut promoted) = promoter.promote_candidate(candidate, promotions.len()) {
+            promoted.source.promoted = Some(promotions.next_index());
+            promotions.push(promoted);
+        }
+    }
+
+    // Insert each of `extra_statements` before its indicated location, which
+    // has to be done in reverse location order, to not invalidate the rest.
+    extra_statements.sort_by_key(|&(loc, _)| cmp::Reverse(loc));
+    for (loc, statement) in extra_statements {
+        body[loc.block].statements.insert(loc.statement_index, statement);
+    }
+
+    // Eliminate assignments to, and drops of promoted temps.
+    let promoted = |index: Local| temps[index] == TempState::PromotedOut;
+    for block in body.basic_blocks_mut() {
+        block.statements.retain(|statement| match &statement.kind {
+            StatementKind::Assign(box (place, _)) => {
+                if let Some(index) = place.as_local() {
+                    !promoted(index)
+                } else {
+                    true
+                }
+            }
+            StatementKind::StorageLive(index) | StatementKind::StorageDead(index) => {
+                !promoted(*index)
+            }
+            _ => true,
+        });
+        let terminator = block.terminator_mut();
+        if let TerminatorKind::Drop { place, target, .. } = &terminator.kind {
+            if let Some(index) = place.as_local() {
+                if promoted(index) {
+                    terminator.kind = TerminatorKind::Goto { target: *target };
+                }
+            }
+        }
+    }
+
+    promotions
+}
+
+/// This function returns `true` if the function being called in the array
+/// repeat expression is a `const` function.
+pub fn is_const_fn_in_array_repeat_expression<'tcx>(
+    ccx: &ConstCx<'_, 'tcx>,
+    place: &Place<'tcx>,
+    body: &Body<'tcx>,
+) -> bool {
+    match place.as_local() {
+        // rule out cases such as: `let my_var = some_fn(); [my_var; N]`
+        Some(local) if body.local_decls[local].is_user_variable() => return false,
+        None => return false,
+        _ => {}
+    }
+
+    for block in body.basic_blocks() {
+        if let Some(Terminator { kind: TerminatorKind::Call { func, destination, .. }, .. }) =
+            &block.terminator
+        {
+            if let Operand::Constant(box Constant { literal, .. }) = func {
+                if let ty::FnDef(def_id, _) = *literal.ty().kind() {
+                    if let Some((destination_place, _)) = destination {
+                        if destination_place == place {
+                            if is_const_fn(ccx.tcx, def_id) {
+                                return true;
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    false
+}
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
new file mode 100644
index 00000000000..40a32a76c94
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -0,0 +1,523 @@
+//! Validates the MIR to ensure that invariants are upheld.
+
+use super::MirPass;
+use rustc_index::bit_set::BitSet;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::mir::interpret::Scalar;
+use rustc_middle::mir::traversal;
+use rustc_middle::mir::visit::{PlaceContext, Visitor};
+use rustc_middle::mir::{
+    AggregateKind, BasicBlock, Body, BorrowKind, Local, Location, MirPhase, Operand, PlaceElem,
+    PlaceRef, ProjectionElem, Rvalue, SourceScope, Statement, StatementKind, Terminator,
+    TerminatorKind,
+};
+use rustc_middle::ty::fold::BottomUpFolder;
+use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt, TypeFoldable};
+use rustc_mir_dataflow::impls::MaybeStorageLive;
+use rustc_mir_dataflow::storage::AlwaysLiveLocals;
+use rustc_mir_dataflow::{Analysis, ResultsCursor};
+use rustc_target::abi::Size;
+
+#[derive(Copy, Clone, Debug)]
+enum EdgeKind {
+    Unwind,
+    Normal,
+}
+
+pub struct Validator {
+    /// Describes at which point in the pipeline this validation is happening.
+    pub when: String,
+    /// The phase for which we are upholding the dialect. If the given phase forbids a specific
+    /// element, this validator will now emit errors if that specific element is encountered.
+    /// Note that phases that change the dialect cause all *following* phases to check the
+    /// invariants of the new dialect. A phase that changes dialects never checks the new invariants
+    /// itself.
+    pub mir_phase: MirPhase,
+}
+
+impl<'tcx> MirPass<'tcx> for Validator {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        let def_id = body.source.def_id();
+        let param_env = tcx.param_env(def_id);
+        let mir_phase = self.mir_phase;
+
+        let always_live_locals = AlwaysLiveLocals::new(body);
+        let storage_liveness = MaybeStorageLive::new(always_live_locals)
+            .into_engine(tcx, body)
+            .iterate_to_fixpoint()
+            .into_results_cursor(body);
+
+        TypeChecker {
+            when: &self.when,
+            body,
+            tcx,
+            param_env,
+            mir_phase,
+            reachable_blocks: traversal::reachable_as_bitset(body),
+            storage_liveness,
+            place_cache: Vec::new(),
+        }
+        .visit_body(body);
+    }
+}
+
+/// Returns whether the two types are equal up to lifetimes.
+/// All lifetimes, including higher-ranked ones, get ignored for this comparison.
+/// (This is unlike the `erasing_regions` methods, which keep higher-ranked lifetimes for soundness reasons.)
+///
+/// The point of this function is to approximate "equal up to subtyping".  However,
+/// the approximation is incorrect as variance is ignored.
+pub fn equal_up_to_regions(
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    src: Ty<'tcx>,
+    dest: Ty<'tcx>,
+) -> bool {
+    // Fast path.
+    if src == dest {
+        return true;
+    }
+
+    // Normalize lifetimes away on both sides, then compare.
+    let param_env = param_env.with_reveal_all_normalized(tcx);
+    let normalize = |ty: Ty<'tcx>| {
+        tcx.normalize_erasing_regions(
+            param_env,
+            ty.fold_with(&mut BottomUpFolder {
+                tcx,
+                // FIXME: We erase all late-bound lifetimes, but this is not fully correct.
+                // If you have a type like `<for<'a> fn(&'a u32) as SomeTrait>::Assoc`,
+                // this is not necessarily equivalent to `<fn(&'static u32) as SomeTrait>::Assoc`,
+                // since one may have an `impl SomeTrait for fn(&32)` and
+                // `impl SomeTrait for fn(&'static u32)` at the same time which
+                // specify distinct values for Assoc. (See also #56105)
+                lt_op: |_| tcx.lifetimes.re_erased,
+                // Leave consts and types unchanged.
+                ct_op: |ct| ct,
+                ty_op: |ty| ty,
+            }),
+        )
+    };
+    tcx.infer_ctxt().enter(|infcx| infcx.can_eq(param_env, normalize(src), normalize(dest)).is_ok())
+}
+
+struct TypeChecker<'a, 'tcx> {
+    when: &'a str,
+    body: &'a Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    mir_phase: MirPhase,
+    reachable_blocks: BitSet<BasicBlock>,
+    storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive>,
+    place_cache: Vec<PlaceRef<'tcx>>,
+}
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+    fn fail(&self, location: Location, msg: impl AsRef<str>) {
+        let span = self.body.source_info(location).span;
+        // We use `delay_span_bug` as we might see broken MIR when other errors have already
+        // occurred.
+        self.tcx.sess.diagnostic().delay_span_bug(
+            span,
+            &format!(
+                "broken MIR in {:?} ({}) at {:?}:\n{}",
+                self.body.source.instance,
+                self.when,
+                location,
+                msg.as_ref()
+            ),
+        );
+    }
+
+    fn check_edge(&self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
+        if let Some(bb) = self.body.basic_blocks().get(bb) {
+            let src = self.body.basic_blocks().get(location.block).unwrap();
+            match (src.is_cleanup, bb.is_cleanup, edge_kind) {
+                // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
+                (false, false, EdgeKind::Normal)
+                // Non-cleanup blocks can jump to cleanup blocks along unwind edges
+                | (false, true, EdgeKind::Unwind)
+                // Cleanup blocks can jump to cleanup blocks along non-unwind edges
+                | (true, true, EdgeKind::Normal) => {}
+                // All other jumps are invalid
+                _ => {
+                    self.fail(
+                        location,
+                        format!(
+                            "{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})",
+                            edge_kind,
+                            bb,
+                            src.is_cleanup,
+                            bb.is_cleanup,
+                        )
+                    )
+                }
+            }
+        } else {
+            self.fail(location, format!("encountered jump to invalid basic block {:?}", bb))
+        }
+    }
+
+    /// Check if src can be assigned into dest.
+    /// This is not precise, it will accept some incorrect assignments.
+    fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
+        // Fast path before we normalize.
+        if src == dest {
+            // Equal types, all is good.
+            return true;
+        }
+        // Normalize projections and things like that.
+        // FIXME: We need to reveal_all, as some optimizations change types in ways
+        // that require unfolding opaque types.
+        let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
+        let src = self.tcx.normalize_erasing_regions(param_env, src);
+        let dest = self.tcx.normalize_erasing_regions(param_env, dest);
+
+        // Type-changing assignments can happen when subtyping is used. While
+        // all normal lifetimes are erased, higher-ranked types with their
+        // late-bound lifetimes are still around and can lead to type
+        // differences. So we compare ignoring lifetimes.
+        equal_up_to_regions(self.tcx, param_env, src, dest)
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
+    fn visit_local(&mut self, local: &Local, context: PlaceContext, location: Location) {
+        if self.body.local_decls.get(*local).is_none() {
+            self.fail(
+                location,
+                format!("local {:?} has no corresponding declaration in `body.local_decls`", local),
+            );
+        }
+
+        if self.reachable_blocks.contains(location.block) && context.is_use() {
+            // Uses of locals must occur while the local's storage is allocated.
+            self.storage_liveness.seek_after_primary_effect(location);
+            let locals_with_storage = self.storage_liveness.get();
+            if !locals_with_storage.contains(*local) {
+                self.fail(location, format!("use of local {:?}, which has no storage here", local));
+            }
+        }
+    }
+
+    fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
+        // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
+        if self.tcx.sess.opts.debugging_opts.validate_mir {
+            // `Operand::Copy` is only supposed to be used with `Copy` types.
+            if let Operand::Copy(place) = operand {
+                let ty = place.ty(&self.body.local_decls, self.tcx).ty;
+                let span = self.body.source_info(location).span;
+
+                if !ty.is_copy_modulo_regions(self.tcx.at(span), self.param_env) {
+                    self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
+                }
+            }
+        }
+
+        self.super_operand(operand, location);
+    }
+
+    fn visit_projection_elem(
+        &mut self,
+        local: Local,
+        proj_base: &[PlaceElem<'tcx>],
+        elem: PlaceElem<'tcx>,
+        context: PlaceContext,
+        location: Location,
+    ) {
+        if let ProjectionElem::Index(index) = elem {
+            let index_ty = self.body.local_decls[index].ty;
+            if index_ty != self.tcx.types.usize {
+                self.fail(location, format!("bad index ({:?} != usize)", index_ty))
+            }
+        }
+        self.super_projection_elem(local, proj_base, elem, context, location);
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        match &statement.kind {
+            StatementKind::Assign(box (dest, rvalue)) => {
+                // LHS and RHS of the assignment must have the same type.
+                let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
+                let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
+                if !self.mir_assign_valid_types(right_ty, left_ty) {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered `{:?}` with incompatible types:\n\
+                            left-hand side has type: {}\n\
+                            right-hand side has type: {}",
+                            statement.kind, left_ty, right_ty,
+                        ),
+                    );
+                }
+                match rvalue {
+                    // The sides of an assignment must not alias. Currently this just checks whether the places
+                    // are identical.
+                    Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) => {
+                        if dest == src {
+                            self.fail(
+                                location,
+                                "encountered `Assign` statement with overlapping memory",
+                            );
+                        }
+                    }
+                    // The deaggregator currently does not deaggreagate arrays.
+                    // So for now, we ignore them here.
+                    Rvalue::Aggregate(box AggregateKind::Array { .. }, _) => {}
+                    // All other aggregates must be gone after some phases.
+                    Rvalue::Aggregate(box kind, _) => {
+                        if self.mir_phase > MirPhase::DropLowering
+                            && !matches!(kind, AggregateKind::Generator(..))
+                        {
+                            // Generators persist until the state machine transformation, but all
+                            // other aggregates must have been lowered.
+                            self.fail(
+                                location,
+                                format!("{:?} have been lowered to field assignments", rvalue),
+                            )
+                        } else if self.mir_phase > MirPhase::GeneratorLowering {
+                            // No more aggregates after drop and generator lowering.
+                            self.fail(
+                                location,
+                                format!("{:?} have been lowered to field assignments", rvalue),
+                            )
+                        }
+                    }
+                    Rvalue::Ref(_, BorrowKind::Shallow, _) => {
+                        if self.mir_phase > MirPhase::DropLowering {
+                            self.fail(
+                                location,
+                                "`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase",
+                            );
+                        }
+                    }
+                    _ => {}
+                }
+            }
+            StatementKind::AscribeUserType(..) => {
+                if self.mir_phase > MirPhase::DropLowering {
+                    self.fail(
+                        location,
+                        "`AscribeUserType` should have been removed after drop lowering phase",
+                    );
+                }
+            }
+            StatementKind::FakeRead(..) => {
+                if self.mir_phase > MirPhase::DropLowering {
+                    self.fail(
+                        location,
+                        "`FakeRead` should have been removed after drop lowering phase",
+                    );
+                }
+            }
+            StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
+                ref src,
+                ref dst,
+                ref count,
+            }) => {
+                let src_ty = src.ty(&self.body.local_decls, self.tcx);
+                let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
+                    src_deref.ty
+                } else {
+                    self.fail(
+                        location,
+                        format!("Expected src to be ptr in copy_nonoverlapping, got: {}", src_ty),
+                    );
+                    return;
+                };
+                let dst_ty = dst.ty(&self.body.local_decls, self.tcx);
+                let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) {
+                    dst_deref.ty
+                } else {
+                    self.fail(
+                        location,
+                        format!("Expected dst to be ptr in copy_nonoverlapping, got: {}", dst_ty),
+                    );
+                    return;
+                };
+                // since CopyNonOverlapping is parametrized by 1 type,
+                // we only need to check that they are equal and not keep an extra parameter.
+                if op_src_ty != op_dst_ty {
+                    self.fail(location, format!("bad arg ({:?} != {:?})", op_src_ty, op_dst_ty));
+                }
+
+                let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
+                if op_cnt_ty != self.tcx.types.usize {
+                    self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
+                }
+            }
+            StatementKind::SetDiscriminant { .. }
+            | StatementKind::StorageLive(..)
+            | StatementKind::StorageDead(..)
+            | StatementKind::LlvmInlineAsm(..)
+            | StatementKind::Retag(_, _)
+            | StatementKind::Coverage(_)
+            | StatementKind::Nop => {}
+        }
+
+        self.super_statement(statement, location);
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        match &terminator.kind {
+            TerminatorKind::Goto { target } => {
+                self.check_edge(location, *target, EdgeKind::Normal);
+            }
+            TerminatorKind::SwitchInt { targets, switch_ty, discr } => {
+                let ty = discr.ty(&self.body.local_decls, self.tcx);
+                if ty != *switch_ty {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered `SwitchInt` terminator with type mismatch: {:?} != {:?}",
+                            ty, switch_ty,
+                        ),
+                    );
+                }
+
+                let target_width = self.tcx.sess.target.pointer_width;
+
+                let size = Size::from_bits(match switch_ty.kind() {
+                    ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(),
+                    ty::Int(int) => int.normalize(target_width).bit_width().unwrap(),
+                    ty::Char => 32,
+                    ty::Bool => 1,
+                    other => bug!("unhandled type: {:?}", other),
+                });
+
+                for (value, target) in targets.iter() {
+                    if Scalar::<()>::try_from_uint(value, size).is_none() {
+                        self.fail(
+                            location,
+                            format!("the value {:#x} is not a proper {:?}", value, switch_ty),
+                        )
+                    }
+
+                    self.check_edge(location, target, EdgeKind::Normal);
+                }
+                self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
+            }
+            TerminatorKind::Drop { target, unwind, .. } => {
+                self.check_edge(location, *target, EdgeKind::Normal);
+                if let Some(unwind) = unwind {
+                    self.check_edge(location, *unwind, EdgeKind::Unwind);
+                }
+            }
+            TerminatorKind::DropAndReplace { target, unwind, .. } => {
+                if self.mir_phase > MirPhase::DropLowering {
+                    self.fail(
+                        location,
+                        "`DropAndReplace` is not permitted to exist after drop elaboration",
+                    );
+                }
+                self.check_edge(location, *target, EdgeKind::Normal);
+                if let Some(unwind) = unwind {
+                    self.check_edge(location, *unwind, EdgeKind::Unwind);
+                }
+            }
+            TerminatorKind::Call { func, args, destination, cleanup, .. } => {
+                let func_ty = func.ty(&self.body.local_decls, self.tcx);
+                match func_ty.kind() {
+                    ty::FnPtr(..) | ty::FnDef(..) => {}
+                    _ => self.fail(
+                        location,
+                        format!("encountered non-callable type {} in `Call` terminator", func_ty),
+                    ),
+                }
+                if let Some((_, target)) = destination {
+                    self.check_edge(location, *target, EdgeKind::Normal);
+                }
+                if let Some(cleanup) = cleanup {
+                    self.check_edge(location, *cleanup, EdgeKind::Unwind);
+                }
+
+                // The call destination place and Operand::Move place used as an argument might be
+                // passed by a reference to the callee. Consequently they must be non-overlapping.
+                // Currently this simply checks for duplicate places.
+                self.place_cache.clear();
+                if let Some((destination, _)) = destination {
+                    self.place_cache.push(destination.as_ref());
+                }
+                for arg in args {
+                    if let Operand::Move(place) = arg {
+                        self.place_cache.push(place.as_ref());
+                    }
+                }
+                let all_len = self.place_cache.len();
+                self.place_cache.sort_unstable();
+                self.place_cache.dedup();
+                let has_duplicates = all_len != self.place_cache.len();
+                if has_duplicates {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered overlapping memory in `Call` terminator: {:?}",
+                            terminator.kind,
+                        ),
+                    );
+                }
+            }
+            TerminatorKind::Assert { cond, target, cleanup, .. } => {
+                let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
+                if cond_ty != self.tcx.types.bool {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered non-boolean condition of type {} in `Assert` terminator",
+                            cond_ty
+                        ),
+                    );
+                }
+                self.check_edge(location, *target, EdgeKind::Normal);
+                if let Some(cleanup) = cleanup {
+                    self.check_edge(location, *cleanup, EdgeKind::Unwind);
+                }
+            }
+            TerminatorKind::Yield { resume, drop, .. } => {
+                if self.mir_phase > MirPhase::GeneratorLowering {
+                    self.fail(location, "`Yield` should have been replaced by generator lowering");
+                }
+                self.check_edge(location, *resume, EdgeKind::Normal);
+                if let Some(drop) = drop {
+                    self.check_edge(location, *drop, EdgeKind::Normal);
+                }
+            }
+            TerminatorKind::FalseEdge { real_target, imaginary_target } => {
+                self.check_edge(location, *real_target, EdgeKind::Normal);
+                self.check_edge(location, *imaginary_target, EdgeKind::Normal);
+            }
+            TerminatorKind::FalseUnwind { real_target, unwind } => {
+                self.check_edge(location, *real_target, EdgeKind::Normal);
+                if let Some(unwind) = unwind {
+                    self.check_edge(location, *unwind, EdgeKind::Unwind);
+                }
+            }
+            TerminatorKind::InlineAsm { destination, .. } => {
+                if let Some(destination) = destination {
+                    self.check_edge(location, *destination, EdgeKind::Normal);
+                }
+            }
+            // Nothing to validate for these.
+            TerminatorKind::Resume
+            | TerminatorKind::Abort
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable
+            | TerminatorKind::GeneratorDrop => {}
+        }
+
+        self.super_terminator(terminator, location);
+    }
+
+    fn visit_source_scope(&mut self, scope: &SourceScope) {
+        if self.body.source_scopes.get(*scope).is_none() {
+            self.tcx.sess.diagnostic().delay_span_bug(
+                self.body.span,
+                &format!(
+                    "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
+                    self.body.source.instance, self.when, scope,
+                ),
+            );
+        }
+    }
+}
diff --git a/compiler/rustc_const_eval/src/util/aggregate.rs b/compiler/rustc_const_eval/src/util/aggregate.rs
new file mode 100644
index 00000000000..4bc0357cab8
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/aggregate.rs
@@ -0,0 +1,75 @@
+use rustc_index::vec::Idx;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+
+use std::convert::TryFrom;
+use std::iter::TrustedLen;
+
+/// Expand `lhs = Rvalue::Aggregate(kind, operands)` into assignments to the fields.
+///
+/// Produces something like
+///
+/// (lhs as Variant).field0 = arg0;     // We only have a downcast if this is an enum
+/// (lhs as Variant).field1 = arg1;
+/// discriminant(lhs) = variant_index;  // If lhs is an enum or generator.
+pub fn expand_aggregate<'tcx>(
+    mut lhs: Place<'tcx>,
+    operands: impl Iterator<Item = (Operand<'tcx>, Ty<'tcx>)> + TrustedLen,
+    kind: AggregateKind<'tcx>,
+    source_info: SourceInfo,
+    tcx: TyCtxt<'tcx>,
+) -> impl Iterator<Item = Statement<'tcx>> + TrustedLen {
+    let mut set_discriminant = None;
+    let active_field_index = match kind {
+        AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
+            if adt_def.is_enum() {
+                set_discriminant = Some(Statement {
+                    kind: StatementKind::SetDiscriminant { place: Box::new(lhs), variant_index },
+                    source_info,
+                });
+                lhs = tcx.mk_place_downcast(lhs, adt_def, variant_index);
+            }
+            active_field_index
+        }
+        AggregateKind::Generator(..) => {
+            // Right now we only support initializing generators to
+            // variant 0 (Unresumed).
+            let variant_index = VariantIdx::new(0);
+            set_discriminant = Some(Statement {
+                kind: StatementKind::SetDiscriminant { place: Box::new(lhs), variant_index },
+                source_info,
+            });
+
+            // Operands are upvars stored on the base place, so no
+            // downcast is necessary.
+
+            None
+        }
+        _ => None,
+    };
+
+    operands
+        .enumerate()
+        .map(move |(i, (op, ty))| {
+            let lhs_field = if let AggregateKind::Array(_) = kind {
+                let offset = u64::try_from(i).unwrap();
+                tcx.mk_place_elem(
+                    lhs,
+                    ProjectionElem::ConstantIndex {
+                        offset,
+                        min_length: offset + 1,
+                        from_end: false,
+                    },
+                )
+            } else {
+                let field = Field::new(active_field_index.unwrap_or(i));
+                tcx.mk_place_field(lhs, field, ty)
+            };
+            Statement {
+                source_info,
+                kind: StatementKind::Assign(Box::new((lhs_field, Rvalue::Use(op)))),
+            }
+        })
+        .chain(set_discriminant)
+}
diff --git a/compiler/rustc_const_eval/src/util/alignment.rs b/compiler/rustc_const_eval/src/util/alignment.rs
new file mode 100644
index 00000000000..73adc60577b
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/alignment.rs
@@ -0,0 +1,70 @@
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_target::abi::Align;
+
+/// Returns `true` if this place is allowed to be less aligned
+/// than its containing struct (because it is within a packed
+/// struct).
+pub fn is_disaligned<'tcx, L>(
+    tcx: TyCtxt<'tcx>,
+    local_decls: &L,
+    param_env: ty::ParamEnv<'tcx>,
+    place: Place<'tcx>,
+) -> bool
+where
+    L: HasLocalDecls<'tcx>,
+{
+    debug!("is_disaligned({:?})", place);
+    let pack = match is_within_packed(tcx, local_decls, place) {
+        None => {
+            debug!("is_disaligned({:?}) - not within packed", place);
+            return false;
+        }
+        Some(pack) => pack,
+    };
+
+    let ty = place.ty(local_decls, tcx).ty;
+    match tcx.layout_of(param_env.and(ty)) {
+        Ok(layout) if layout.align.abi <= pack => {
+            // If the packed alignment is greater or equal to the field alignment, the type won't be
+            // further disaligned.
+            debug!(
+                "is_disaligned({:?}) - align = {}, packed = {}; not disaligned",
+                place,
+                layout.align.abi.bytes(),
+                pack.bytes()
+            );
+            false
+        }
+        _ => {
+            debug!("is_disaligned({:?}) - true", place);
+            true
+        }
+    }
+}
+
+fn is_within_packed<'tcx, L>(
+    tcx: TyCtxt<'tcx>,
+    local_decls: &L,
+    place: Place<'tcx>,
+) -> Option<Align>
+where
+    L: HasLocalDecls<'tcx>,
+{
+    for (place_base, elem) in place.iter_projections().rev() {
+        match elem {
+            // encountered a Deref, which is ABI-aligned
+            ProjectionElem::Deref => break,
+            ProjectionElem::Field(..) => {
+                let ty = place_base.ty(local_decls, tcx).ty;
+                match ty.kind() {
+                    ty::Adt(def, _) => return def.repr.pack,
+                    _ => {}
+                }
+            }
+            _ => {}
+        }
+    }
+
+    None
+}
diff --git a/compiler/rustc_const_eval/src/util/collect_writes.rs b/compiler/rustc_const_eval/src/util/collect_writes.rs
new file mode 100644
index 00000000000..9c56fd722bd
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/collect_writes.rs
@@ -0,0 +1,36 @@
+use rustc_middle::mir::visit::PlaceContext;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{Body, Local, Location};
+
+pub trait FindAssignments {
+    // Finds all statements that assign directly to local (i.e., X = ...)
+    // and returns their locations.
+    fn find_assignments(&self, local: Local) -> Vec<Location>;
+}
+
+impl<'tcx> FindAssignments for Body<'tcx> {
+    fn find_assignments(&self, local: Local) -> Vec<Location> {
+        let mut visitor = FindLocalAssignmentVisitor { needle: local, locations: vec![] };
+        visitor.visit_body(self);
+        visitor.locations
+    }
+}
+
+// The Visitor walks the MIR to return the assignment statements corresponding
+// to a Local.
+struct FindLocalAssignmentVisitor {
+    needle: Local,
+    locations: Vec<Location>,
+}
+
+impl<'tcx> Visitor<'tcx> for FindLocalAssignmentVisitor {
+    fn visit_local(&mut self, local: &Local, place_context: PlaceContext, location: Location) {
+        if self.needle != *local {
+            return;
+        }
+
+        if place_context.is_place_assignment() {
+            self.locations.push(location);
+        }
+    }
+}
diff --git a/compiler/rustc_const_eval/src/util/find_self_call.rs b/compiler/rustc_const_eval/src/util/find_self_call.rs
new file mode 100644
index 00000000000..33ad128eeeb
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/find_self_call.rs
@@ -0,0 +1,36 @@
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::def_id::DefId;
+
+/// Checks if the specified `local` is used as the `self` parameter of a method call
+/// in the provided `BasicBlock`. If it is, then the `DefId` of the called method is
+/// returned.
+pub fn find_self_call<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    local: Local,
+    block: BasicBlock,
+) -> Option<(DefId, SubstsRef<'tcx>)> {
+    debug!("find_self_call(local={:?}): terminator={:?}", local, &body[block].terminator);
+    if let Some(Terminator { kind: TerminatorKind::Call { func, args, .. }, .. }) =
+        &body[block].terminator
+    {
+        debug!("find_self_call: func={:?}", func);
+        if let Operand::Constant(box Constant { literal, .. }) = func {
+            if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
+                if let Some(ty::AssocItem { fn_has_self_parameter: true, .. }) =
+                    tcx.opt_associated_item(def_id)
+                {
+                    debug!("find_self_call: args={:?}", args);
+                    if let [Operand::Move(self_place) | Operand::Copy(self_place), ..] = **args {
+                        if self_place.as_local() == Some(local) {
+                            return Some((def_id, substs));
+                        }
+                    }
+                }
+            }
+        }
+    }
+    None
+}
diff --git a/compiler/rustc_const_eval/src/util/mod.rs b/compiler/rustc_const_eval/src/util/mod.rs
new file mode 100644
index 00000000000..4a406f8bfd0
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/mod.rs
@@ -0,0 +1,8 @@
+pub mod aggregate;
+mod alignment;
+pub mod collect_writes;
+mod find_self_call;
+
+pub use self::aggregate::expand_aggregate;
+pub use self::alignment::is_disaligned;
+pub use self::find_self_call::find_self_call;