about summary refs log tree commit diff
path: root/compiler/rustc_const_eval/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval/src')
-rw-r--r--compiler/rustc_const_eval/src/const_eval/error.rs149
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs59
-rw-r--r--compiler/rustc_const_eval/src/const_eval/fn_queries.rs21
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs268
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs8
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs21
-rw-r--r--compiler/rustc_const_eval/src/errors.rs18
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs60
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs238
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs138
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs40
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs144
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs17
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs75
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs184
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs1
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs320
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs14
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs248
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs18
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs82
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs150
-rw-r--r--compiler/rustc_const_eval/src/interpret/traits.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs16
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs74
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs24
-rw-r--r--compiler/rustc_const_eval/src/lib.rs13
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs124
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/mod.rs33
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/ops.rs105
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs5
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs25
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/resolver.rs15
-rw-r--r--compiler/rustc_const_eval/src/transform/promote_consts.rs164
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs294
-rw-r--r--compiler/rustc_const_eval/src/util/aggregate.rs77
-rw-r--r--compiler/rustc_const_eval/src/util/call_kind.rs23
-rw-r--r--compiler/rustc_const_eval/src/util/check_validity_requirement.rs (renamed from compiler/rustc_const_eval/src/util/might_permit_raw_init.rs)69
-rw-r--r--compiler/rustc_const_eval/src/util/compare_types.rs63
-rw-r--r--compiler/rustc_const_eval/src/util/mod.rs8
-rw-r--r--compiler/rustc_const_eval/src/util/type_name.rs6
41 files changed, 1914 insertions, 1499 deletions
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs
index 4977a5d6bbf..0579f781535 100644
--- a/compiler/rustc_const_eval/src/const_eval/error.rs
+++ b/compiler/rustc_const_eval/src/const_eval/error.rs
@@ -36,16 +36,16 @@ impl<'tcx> Into<InterpErrorInfo<'tcx>> for ConstEvalErrKind {
 impl fmt::Display for ConstEvalErrKind {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         use self::ConstEvalErrKind::*;
-        match *self {
+        match self {
             ConstAccessesStatic => write!(f, "constant accesses static"),
             ModifiedGlobal => {
                 write!(f, "modifying a static's initial value from another static's initializer")
             }
-            AssertFailure(ref msg) => write!(f, "{:?}", msg),
+            AssertFailure(msg) => write!(f, "{:?}", msg),
             Panic { msg, line, col, file } => {
                 write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col)
             }
-            Abort(ref msg) => write!(f, "{}", msg),
+            Abort(msg) => write!(f, "{}", msg),
         }
     }
 }
@@ -55,7 +55,7 @@ impl Error for ConstEvalErrKind {}
 /// When const-evaluation errors, this type is constructed with the resulting information,
 /// and then used to emit the error as a lint or hard error.
 #[derive(Debug)]
-pub struct ConstEvalErr<'tcx> {
+pub(super) struct ConstEvalErr<'tcx> {
     pub span: Span,
     pub error: InterpError<'tcx>,
     pub stacktrace: Vec<FrameInfo<'tcx>>,
@@ -82,8 +82,61 @@ impl<'tcx> ConstEvalErr<'tcx> {
         ConstEvalErr { error: error.into_kind(), stacktrace, span }
     }
 
-    pub fn report_as_error(&self, tcx: TyCtxtAt<'tcx>, message: &str) -> ErrorHandled {
-        self.struct_error(tcx, message, |_| {})
+    pub(super) fn report(&self, tcx: TyCtxtAt<'tcx>, message: &str) -> ErrorHandled {
+        self.report_decorated(tcx, message, |_| {})
+    }
+
+    #[instrument(level = "trace", skip(self, decorate))]
+    pub(super) fn decorate(&self, err: &mut Diagnostic, decorate: impl FnOnce(&mut Diagnostic)) {
+        trace!("reporting const eval failure at {:?}", self.span);
+        // Add some more context for select error types.
+        match self.error {
+            InterpError::Unsupported(
+                UnsupportedOpInfo::ReadPointerAsBytes
+                | UnsupportedOpInfo::PartialPointerOverwrite(_)
+                | UnsupportedOpInfo::PartialPointerCopy(_),
+            ) => {
+                err.help("this code performed an operation that depends on the underlying bytes representing a pointer");
+                err.help("the absolute address of a pointer is not known at compile-time, so such operations are not supported");
+            }
+            _ => {}
+        }
+        // Add spans for the stacktrace. Don't print a single-line backtrace though.
+        if self.stacktrace.len() > 1 {
+            // Helper closure to print duplicated lines.
+            let mut flush_last_line = |last_frame, times| {
+                if let Some((line, span)) = last_frame {
+                    err.span_note(span, &line);
+                    // Don't print [... additional calls ...] if the number of lines is small
+                    if times < 3 {
+                        for _ in 0..times {
+                            err.span_note(span, &line);
+                        }
+                    } else {
+                        err.span_note(
+                            span,
+                            format!("[... {} additional calls {} ...]", times, &line),
+                        );
+                    }
+                }
+            };
+
+            let mut last_frame = None;
+            let mut times = 0;
+            for frame_info in &self.stacktrace {
+                let frame = (frame_info.to_string(), frame_info.span);
+                if last_frame.as_ref() == Some(&frame) {
+                    times += 1;
+                } else {
+                    flush_last_line(last_frame, times);
+                    last_frame = Some(frame);
+                    times = 0;
+                }
+            }
+            flush_last_line(last_frame, times);
+        }
+        // Let the caller attach any additional information it wants.
+        decorate(err);
     }
 
     /// Create a diagnostic for this const eval error.
@@ -95,94 +148,36 @@ impl<'tcx> ConstEvalErr<'tcx> {
     /// If `lint_root.is_some()` report it as a lint, else report it as a hard error.
     /// (Except that for some errors, we ignore all that -- see `must_error` below.)
     #[instrument(skip(self, tcx, decorate), level = "debug")]
-    pub fn struct_error(
+    pub(super) fn report_decorated(
         &self,
         tcx: TyCtxtAt<'tcx>,
         message: &str,
         decorate: impl FnOnce(&mut Diagnostic),
     ) -> ErrorHandled {
-        let finish = |err: &mut Diagnostic, span_msg: Option<String>| {
-            trace!("reporting const eval failure at {:?}", self.span);
-            if let Some(span_msg) = span_msg {
-                err.span_label(self.span, span_msg);
-            }
-            // Add some more context for select error types.
-            match self.error {
-                InterpError::Unsupported(
-                    UnsupportedOpInfo::ReadPointerAsBytes
-                    | UnsupportedOpInfo::PartialPointerOverwrite(_)
-                    | UnsupportedOpInfo::PartialPointerCopy(_),
-                ) => {
-                    err.help("this code performed an operation that depends on the underlying bytes representing a pointer");
-                    err.help("the absolute address of a pointer is not known at compile-time, so such operations are not supported");
-                }
-                _ => {}
-            }
-            // Add spans for the stacktrace. Don't print a single-line backtrace though.
-            if self.stacktrace.len() > 1 {
-                // Helper closure to print duplicated lines.
-                let mut flush_last_line = |last_frame, times| {
-                    if let Some((line, span)) = last_frame {
-                        err.span_label(span, &line);
-                        // Don't print [... additional calls ...] if the number of lines is small
-                        if times < 3 {
-                            for _ in 0..times {
-                                err.span_label(span, &line);
-                            }
-                        } else {
-                            err.span_label(
-                                span,
-                                format!("[... {} additional calls {} ...]", times, &line),
-                            );
-                        }
-                    }
-                };
-
-                let mut last_frame = None;
-                let mut times = 0;
-                for frame_info in &self.stacktrace {
-                    let frame = (frame_info.to_string(), frame_info.span);
-                    if last_frame.as_ref() == Some(&frame) {
-                        times += 1;
-                    } else {
-                        flush_last_line(last_frame, times);
-                        last_frame = Some(frame);
-                        times = 0;
-                    }
-                }
-                flush_last_line(last_frame, times);
-            }
-            // Let the caller attach any additional information it wants.
-            decorate(err);
-        };
-
         debug!("self.error: {:?}", self.error);
         // Special handling for certain errors
         match &self.error {
             // Don't emit a new diagnostic for these errors
             err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
-                return ErrorHandled::TooGeneric;
-            }
-            err_inval!(AlreadyReported(error_reported)) => {
-                return ErrorHandled::Reported(*error_reported);
+                ErrorHandled::TooGeneric
             }
+            err_inval!(AlreadyReported(error_reported)) => ErrorHandled::Reported(*error_reported),
             err_inval!(Layout(LayoutError::SizeOverflow(_))) => {
                 // We must *always* hard error on these, even if the caller wants just a lint.
                 // The `message` makes little sense here, this is a more serious error than the
                 // caller thinks anyway.
                 // See <https://github.com/rust-lang/rust/pull/63152>.
                 let mut err = struct_error(tcx, &self.error.to_string());
-                finish(&mut err, None);
-                return ErrorHandled::Reported(err.emit());
+                self.decorate(&mut err, decorate);
+                ErrorHandled::Reported(err.emit())
             }
-            _ => {}
-        };
-
-        let err_msg = self.error.to_string();
-
-        // Report as hard error.
-        let mut err = struct_error(tcx, message);
-        finish(&mut err, Some(err_msg));
-        ErrorHandled::Reported(err.emit())
+            _ => {
+                // Report as hard error.
+                let mut err = struct_error(tcx, message);
+                err.span_label(self.span, self.error.to_string());
+                self.decorate(&mut err, decorate);
+                ErrorHandled::Reported(err.emit())
+            }
+        }
     }
 }
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index 1b1052fdf47..7564ba17b40 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -1,10 +1,7 @@
-use super::{CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr};
-use crate::interpret::eval_nullary_intrinsic;
-use crate::interpret::{
-    intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId,
-    Immediate, InternKind, InterpCx, InterpError, InterpResult, MPlaceTy, MemoryKind, OpTy,
-    RefTracking, StackPopCleanup,
-};
+use crate::const_eval::CheckAlignment;
+use std::borrow::Cow;
+
+use either::{Left, Right};
 
 use rustc_hir::def::DefKind;
 use rustc_middle::mir;
@@ -16,8 +13,14 @@ use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::{self, TyCtxt};
 use rustc_span::source_map::Span;
 use rustc_target::abi::{self, Abi};
-use std::borrow::Cow;
-use std::convert::TryInto;
+
+use super::{CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr};
+use crate::interpret::eval_nullary_intrinsic;
+use crate::interpret::{
+    intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId,
+    Immediate, InternKind, InterpCx, InterpError, InterpResult, MPlaceTy, MemoryKind, OpTy,
+    RefTracking, StackPopCleanup,
+};
 
 const NOTE_ON_UNDEFINED_BEHAVIOR_ERROR: &str = "The rules on what exactly is undefined behavior aren't clear, \
      so this check might be overzealous. Please open an issue on the rustc \
@@ -46,12 +49,12 @@ fn eval_body_using_ecx<'mir, 'tcx>(
         ecx.tcx.def_kind(cid.instance.def_id())
     );
     let layout = ecx.layout_of(body.bound_return_ty().subst(tcx, cid.instance.substs))?;
-    assert!(!layout.is_unsized());
+    assert!(layout.is_sized());
     let ret = ecx.allocate(layout, MemoryKind::Stack)?;
 
     trace!(
         "eval_body_using_ecx: pushing stack frame for global: {}{}",
-        with_no_trimmed_paths!(ty::tls::with(|tcx| tcx.def_path_str(cid.instance.def_id()))),
+        with_no_trimmed_paths!(ecx.tcx.def_path_str(cid.instance.def_id())),
         cid.promoted.map_or_else(String::new, |p| format!("::promoted[{:?}]", p))
     );
 
@@ -63,7 +66,7 @@ fn eval_body_using_ecx<'mir, 'tcx>(
     )?;
 
     // The main interpreter loop.
-    ecx.run()?;
+    while ecx.step()? {}
 
     // Intern the result
     let intern_kind = if cid.promoted.is_some() {
@@ -74,7 +77,7 @@ fn eval_body_using_ecx<'mir, 'tcx>(
             None => InternKind::Constant,
         }
     };
-    ecx.machine.check_alignment = false; // interning doesn't need to respect alignment
+    ecx.machine.check_alignment = CheckAlignment::No; // interning doesn't need to respect alignment
     intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
     // we leave alignment checks off, since this `ecx` will not be used for further evaluation anyway
 
@@ -100,11 +103,7 @@ pub(super) fn mk_eval_cx<'mir, 'tcx>(
         tcx,
         root_span,
         param_env,
-        CompileTimeInterpreter::new(
-            tcx.const_eval_limit(),
-            can_access_statics,
-            /*check_alignment:*/ false,
-        ),
+        CompileTimeInterpreter::new(tcx.const_eval_limit(), can_access_statics, CheckAlignment::No),
     )
 }
 
@@ -135,14 +134,14 @@ pub(super) fn op_to_const<'tcx>(
         _ => false,
     };
     let immediate = if try_as_immediate {
-        Err(ecx.read_immediate(op).expect("normalization works on validated constants"))
+        Right(ecx.read_immediate(op).expect("normalization works on validated constants"))
     } else {
         // It is guaranteed that any non-slice scalar pair is actually ByRef here.
         // When we come back from raw const eval, we are always by-ref. The only way our op here is
         // by-val is if we are in destructure_mir_constant, i.e., if this is (a field of) something that we
         // "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or
         // structs containing such.
-        op.try_as_mplace()
+        op.as_mplace_or_imm()
     };
 
     debug!(?immediate);
@@ -168,9 +167,9 @@ pub(super) fn op_to_const<'tcx>(
         }
     };
     match immediate {
-        Ok(ref mplace) => to_const_value(mplace),
+        Left(ref mplace) => to_const_value(mplace),
         // see comment on `let try_as_immediate` above
-        Err(imm) => match *imm {
+        Right(imm) => match *imm {
             _ if imm.layout.is_zst() => ConstValue::ZeroSized,
             Immediate::Scalar(x) => ConstValue::Scalar(x),
             Immediate::ScalarPair(a, b) => {
@@ -181,13 +180,13 @@ pub(super) fn op_to_const<'tcx>(
                         (ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
                     }
                     (None, _offset) => (
-                        ecx.tcx.intern_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
+                        ecx.tcx.mk_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
                             b"" as &[u8],
                         )),
                         0,
                     ),
                 };
-                let len = b.to_machine_usize(ecx).unwrap();
+                let len = b.to_target_usize(ecx).unwrap();
                 let start = start.try_into().unwrap();
                 let len: usize = len.try_into().unwrap();
                 ConstValue::Slice { data, start, end: start + len }
@@ -255,7 +254,7 @@ pub fn eval_to_const_value_raw_provider<'tcx>(
         return eval_nullary_intrinsic(tcx, key.param_env, def_id, substs).map_err(|error| {
             let span = tcx.def_span(def_id);
             let error = ConstEvalErr { error: error.into_kind(), stacktrace: vec![], span };
-            error.report_as_error(tcx.at(span), "could not evaluate nullary intrinsic")
+            error.report(tcx.at(span), "could not evaluate nullary intrinsic")
         });
     }
 
@@ -309,7 +308,11 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
         CompileTimeInterpreter::new(
             tcx.const_eval_limit(),
             /*can_access_statics:*/ is_static,
-            /*check_alignment:*/ tcx.sess.opts.unstable_opts.extra_const_ub_checks,
+            if tcx.sess.opts.unstable_opts.extra_const_ub_checks {
+                CheckAlignment::Error
+            } else {
+                CheckAlignment::FutureIncompat
+            },
         ),
     );
 
@@ -333,7 +336,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
                 }
             };
 
-            Err(err.report_as_error(ecx.tcx.at(err.span), &msg))
+            Err(err.report(ecx.tcx.at(err.span), &msg))
         }
         Ok(mplace) => {
             // Since evaluation had no errors, validate the resulting constant.
@@ -358,7 +361,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
             if let Err(error) = validation {
                 // Validation failed, report an error. This is always a hard error.
                 let err = ConstEvalErr::new(&ecx, error, None);
-                Err(err.struct_error(
+                Err(err.report_decorated(
                     ecx.tcx,
                     "it is undefined behavior to use this value",
                     |diag| {
diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
index f1674d04f8d..6dcfdc14790 100644
--- a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
@@ -1,15 +1,22 @@
+use rustc_attr as attr;
 use rustc_hir as hir;
 use rustc_hir::def::DefKind;
 use rustc_hir::def_id::{DefId, LocalDefId};
 use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::{DefIdTree, TyCtxt};
+use rustc_middle::ty::TyCtxt;
 use rustc_span::symbol::Symbol;
 
-/// Whether the `def_id` is an unstable const fn and what feature gate is necessary to enable it
-pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> {
+/// Whether the `def_id` is an unstable const fn and what feature gate(s) are necessary to enable
+/// it.
+pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<(Symbol, Option<Symbol>)> {
     if tcx.is_const_fn_raw(def_id) {
         let const_stab = tcx.lookup_const_stability(def_id)?;
-        if const_stab.is_const_unstable() { Some(const_stab.feature) } else { None }
+        match const_stab.level {
+            attr::StabilityLevel::Unstable { implied_by, .. } => {
+                Some((const_stab.feature, implied_by))
+            }
+            attr::StabilityLevel::Stable { .. } => None,
+        }
     } else {
         None
     }
@@ -17,7 +24,8 @@ pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> {
 
 pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
     let parent_id = tcx.local_parent(def_id);
-    tcx.def_kind(parent_id) == DefKind::Impl && tcx.constness(parent_id) == hir::Constness::Const
+    matches!(tcx.def_kind(parent_id), DefKind::Impl { .. })
+        && tcx.constness(parent_id) == hir::Constness::Const
 }
 
 /// Checks whether an item is considered to be `const`. If it is a constructor, it is const. If
@@ -41,6 +49,7 @@ fn constness(tcx: TyCtxt<'_>, def_id: DefId) -> hir::Constness {
             };
             if is_const { hir::Constness::Const } else { hir::Constness::NotConst }
         }
+        hir::Node::Expr(e) if let hir::ExprKind::Closure(c) = e.kind => c.constness,
         _ => {
             if let Some(fn_kind) = node.fn_kind() {
                 if fn_kind.constness() == hir::Constness::Const {
@@ -65,7 +74,7 @@ fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
                 if cfg!(debug_assertions) && stab.promotable {
                     let sig = tcx.fn_sig(def_id);
                     assert_eq!(
-                        sig.unsafety(),
+                        sig.skip_binder().unsafety(),
                         hir::Unsafety::Normal,
                         "don't mark const unsafe fns as promotable",
                         // https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
index 35d58d2f638..a44f70ed059 100644
--- a/compiler/rustc_const_eval/src/const_eval/machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -1,8 +1,13 @@
 use rustc_hir::def::DefKind;
+use rustc_hir::{LangItem, CRATE_HIR_ID};
 use rustc_middle::mir;
+use rustc_middle::mir::interpret::PointerArithmetic;
+use rustc_middle::ty::layout::FnAbiOf;
 use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_session::lint::builtin::INVALID_ALIGNMENT;
 use std::borrow::Borrow;
 use std::hash::Hash;
+use std::ops::ControlFlow;
 
 use rustc_data_structures::fx::FxIndexMap;
 use rustc_data_structures::fx::IndexEntry;
@@ -17,58 +22,12 @@ use rustc_target::abi::{Align, Size};
 use rustc_target::spec::abi::Abi as CallAbi;
 
 use crate::interpret::{
-    self, compile_time_machine, AllocId, ConstAllocation, Frame, ImmTy, InterpCx, InterpResult,
-    OpTy, PlaceTy, Pointer, Scalar, StackPopUnwind,
+    self, compile_time_machine, AllocId, ConstAllocation, FnVal, Frame, ImmTy, InterpCx,
+    InterpResult, OpTy, PlaceTy, Pointer, Scalar, StackPopUnwind,
 };
 
 use super::error::*;
 
-impl<'mir, 'tcx> InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> {
-    /// "Intercept" a function call to a panic-related function
-    /// because we have something special to do for it.
-    /// If this returns successfully (`Ok`), the function should just be evaluated normally.
-    fn hook_special_const_fn(
-        &mut self,
-        instance: ty::Instance<'tcx>,
-        args: &[OpTy<'tcx>],
-    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
-        // All `#[rustc_do_not_const_check]` functions should be hooked here.
-        let def_id = instance.def_id();
-
-        if Some(def_id) == self.tcx.lang_items().panic_display()
-            || Some(def_id) == self.tcx.lang_items().begin_panic_fn()
-        {
-            // &str or &&str
-            assert!(args.len() == 1);
-
-            let mut msg_place = self.deref_operand(&args[0])?;
-            while msg_place.layout.ty.is_ref() {
-                msg_place = self.deref_operand(&msg_place.into())?;
-            }
-
-            let msg = Symbol::intern(self.read_str(&msg_place)?);
-            let span = self.find_closest_untracked_caller_location();
-            let (file, line, col) = self.location_triple_for_span(span);
-            return Err(ConstEvalErrKind::Panic { msg, file, line, col }.into());
-        } else if Some(def_id) == self.tcx.lang_items().panic_fmt() {
-            // For panic_fmt, call const_panic_fmt instead.
-            if let Some(const_panic_fmt) = self.tcx.lang_items().const_panic_fmt() {
-                return Ok(Some(
-                    ty::Instance::resolve(
-                        *self.tcx,
-                        ty::ParamEnv::reveal_all(),
-                        const_panic_fmt,
-                        self.tcx.intern_substs(&[]),
-                    )
-                    .unwrap()
-                    .unwrap(),
-                ));
-            }
-        }
-        Ok(None)
-    }
-}
-
 /// Extra machine state for CTFE, and the Machine instance
 pub struct CompileTimeInterpreter<'mir, 'tcx> {
     /// For now, the number of terminators that can be evaluated before we throw a resource
@@ -89,14 +48,34 @@ pub struct CompileTimeInterpreter<'mir, 'tcx> {
     pub(super) can_access_statics: bool,
 
     /// Whether to check alignment during evaluation.
-    pub(super) check_alignment: bool,
+    pub(super) check_alignment: CheckAlignment,
+}
+
+#[derive(Copy, Clone)]
+pub enum CheckAlignment {
+    /// Ignore alignment when following relocations.
+    /// This is mainly used in interning.
+    No,
+    /// Hard error when dereferencing a misaligned pointer.
+    Error,
+    /// Emit a future incompat lint when dereferencing a misaligned pointer.
+    FutureIncompat,
+}
+
+impl CheckAlignment {
+    pub fn should_check(&self) -> bool {
+        match self {
+            CheckAlignment::No => false,
+            CheckAlignment::Error | CheckAlignment::FutureIncompat => true,
+        }
+    }
 }
 
 impl<'mir, 'tcx> CompileTimeInterpreter<'mir, 'tcx> {
     pub(crate) fn new(
         const_eval_limit: Limit,
         can_access_statics: bool,
-        check_alignment: bool,
+        check_alignment: CheckAlignment,
     ) -> Self {
         CompileTimeInterpreter {
             steps_remaining: const_eval_limit.0,
@@ -191,6 +170,125 @@ impl interpret::MayLeak for ! {
 }
 
 impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
+    /// "Intercept" a function call, because we have something special to do for it.
+    /// All `#[rustc_do_not_const_check]` functions should be hooked here.
+    /// If this returns `Some` function, which may be `instance` or a different function with
+    /// compatible arguments, then evaluation should continue with that function.
+    /// If this returns `None`, the function call has been handled and the function has returned.
+    fn hook_special_const_fn(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx>],
+        dest: &PlaceTy<'tcx>,
+        ret: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
+        let def_id = instance.def_id();
+
+        if Some(def_id) == self.tcx.lang_items().panic_display()
+            || Some(def_id) == self.tcx.lang_items().begin_panic_fn()
+        {
+            // &str or &&str
+            assert!(args.len() == 1);
+
+            let mut msg_place = self.deref_operand(&args[0])?;
+            while msg_place.layout.ty.is_ref() {
+                msg_place = self.deref_operand(&msg_place.into())?;
+            }
+
+            let msg = Symbol::intern(self.read_str(&msg_place)?);
+            let span = self.find_closest_untracked_caller_location();
+            let (file, line, col) = self.location_triple_for_span(span);
+            return Err(ConstEvalErrKind::Panic { msg, file, line, col }.into());
+        } else if Some(def_id) == self.tcx.lang_items().panic_fmt() {
+            // For panic_fmt, call const_panic_fmt instead.
+            let const_def_id = self.tcx.require_lang_item(LangItem::ConstPanicFmt, None);
+            let new_instance = ty::Instance::resolve(
+                *self.tcx,
+                ty::ParamEnv::reveal_all(),
+                const_def_id,
+                instance.substs,
+            )
+            .unwrap()
+            .unwrap();
+
+            return Ok(Some(new_instance));
+        } else if Some(def_id) == self.tcx.lang_items().align_offset_fn() {
+            // For align_offset, we replace the function call if the pointer has no address.
+            match self.align_offset(instance, args, dest, ret)? {
+                ControlFlow::Continue(()) => return Ok(Some(instance)),
+                ControlFlow::Break(()) => return Ok(None),
+            }
+        }
+        Ok(Some(instance))
+    }
+
+    /// `align_offset(ptr, target_align)` needs special handling in const eval, because the pointer
+    /// may not have an address.
+    ///
+    /// If `ptr` does have a known address, then we return `Continue(())` and the function call should
+    /// proceed as normal.
+    ///
+    /// If `ptr` doesn't have an address, but its underlying allocation's alignment is at most
+    /// `target_align`, then we call the function again with an dummy address relative to the
+    /// allocation.
+    ///
+    /// If `ptr` doesn't have an address and `target_align` is stricter than the underlying
+    /// allocation's alignment, then we return `usize::MAX` immediately.
+    fn align_offset(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx>],
+        dest: &PlaceTy<'tcx>,
+        ret: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx, ControlFlow<()>> {
+        assert_eq!(args.len(), 2);
+
+        let ptr = self.read_pointer(&args[0])?;
+        let target_align = self.read_scalar(&args[1])?.to_target_usize(self)?;
+
+        if !target_align.is_power_of_two() {
+            throw_ub_format!("`align_offset` called with non-power-of-two align: {}", target_align);
+        }
+
+        match self.ptr_try_get_alloc_id(ptr) {
+            Ok((alloc_id, offset, _extra)) => {
+                let (_size, alloc_align, _kind) = self.get_alloc_info(alloc_id);
+
+                if target_align <= alloc_align.bytes() {
+                    // Extract the address relative to the allocation base that is definitely
+                    // sufficiently aligned and call `align_offset` again.
+                    let addr = ImmTy::from_uint(offset.bytes(), args[0].layout).into();
+                    let align = ImmTy::from_uint(target_align, args[1].layout).into();
+                    let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
+
+                    // We replace the entire function call with a "tail call".
+                    // Note that this happens before the frame of the original function
+                    // is pushed on the stack.
+                    self.eval_fn_call(
+                        FnVal::Instance(instance),
+                        (CallAbi::Rust, fn_abi),
+                        &[addr, align],
+                        /* with_caller_location = */ false,
+                        dest,
+                        ret,
+                        StackPopUnwind::NotAllowed,
+                    )?;
+                    Ok(ControlFlow::Break(()))
+                } else {
+                    // Not alignable in const, return `usize::MAX`.
+                    let usize_max = Scalar::from_target_usize(self.target_usize_max(), self);
+                    self.write_scalar(usize_max, dest)?;
+                    self.return_to_block(ret)?;
+                    Ok(ControlFlow::Break(()))
+                }
+            }
+            Err(_addr) => {
+                // The pointer has an address, continue with function call.
+                Ok(ControlFlow::Continue(()))
+            }
+        }
+    }
+
     /// See documentation on the `ptr_guaranteed_cmp` intrinsic.
     fn guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8> {
         Ok(match (a, b) {
@@ -232,7 +330,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
     const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error
 
     #[inline(always)]
-    fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+    fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment {
         ecx.machine.check_alignment
     }
 
@@ -241,6 +339,36 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
         ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks
     }
 
+    fn alignment_check_failed(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        has: Align,
+        required: Align,
+        check: CheckAlignment,
+    ) -> InterpResult<'tcx, ()> {
+        let err = err_ub!(AlignmentCheckFailed { has, required }).into();
+        match check {
+            CheckAlignment::Error => Err(err),
+            CheckAlignment::No => span_bug!(
+                ecx.cur_span(),
+                "`alignment_check_failed` called when no alignment check requested"
+            ),
+            CheckAlignment::FutureIncompat => {
+                let err = ConstEvalErr::new(ecx, err, None);
+                ecx.tcx.struct_span_lint_hir(
+                    INVALID_ALIGNMENT,
+                    ecx.stack().iter().find_map(|frame| frame.lint_root()).unwrap_or(CRATE_HIR_ID),
+                    err.span,
+                    err.error.to_string(),
+                    |db| {
+                        err.decorate(db, |_| {});
+                        db
+                    },
+                );
+                Ok(())
+            }
+        }
+    }
+
     fn load_mir(
         ecx: &InterpCx<'mir, 'tcx, Self>,
         instance: ty::InstanceDef<'tcx>,
@@ -271,8 +399,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
         instance: ty::Instance<'tcx>,
         _abi: CallAbi,
         args: &[OpTy<'tcx>],
-        _dest: &PlaceTy<'tcx>,
-        _ret: Option<mir::BasicBlock>,
+        dest: &PlaceTy<'tcx>,
+        ret: Option<mir::BasicBlock>,
         _unwind: StackPopUnwind, // unwinding is not supported in consts
     ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>> {
         debug!("find_mir_or_eval_fn: {:?}", instance);
@@ -280,7 +408,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
         // Only check non-glue functions
         if let ty::InstanceDef::Item(def) = instance.def {
             // Execution might have wandered off into other crates, so we cannot do a stability-
-            // sensitive check here.  But we can at least rule out functions that are not const
+            // sensitive check here. But we can at least rule out functions that are not const
             // at all.
             if !ecx.tcx.is_const_fn_raw(def.did) {
                 // allow calling functions inside a trait marked with #[const_trait].
@@ -291,7 +419,11 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
                 }
             }
 
-            if let Some(new_instance) = ecx.hook_special_const_fn(instance, args)? {
+            let Some(new_instance) = ecx.hook_special_const_fn(instance, args, dest, ret)? else {
+                return Ok(None);
+            };
+
+            if new_instance != instance {
                 // We call another const fn instead.
                 // However, we return the *original* instance to make backtraces work out
                 // (and we hope this does not confuse the FnAbi checks too much).
@@ -300,13 +432,14 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
                     new_instance,
                     _abi,
                     args,
-                    _dest,
-                    _ret,
+                    dest,
+                    ret,
                     _unwind,
                 )?
                 .map(|(body, _instance)| (body, instance)));
             }
         }
+
         // This is a const fn. Call it.
         Ok(Some((ecx.load_mir(instance.def, None)?, instance)))
     }
@@ -337,8 +470,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
                 ecx.write_scalar(Scalar::from_u8(cmp), dest)?;
             }
             sym::const_allocate => {
-                let size = ecx.read_scalar(&args[0])?.to_machine_usize(ecx)?;
-                let align = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?;
+                let size = ecx.read_scalar(&args[0])?.to_target_usize(ecx)?;
+                let align = ecx.read_scalar(&args[1])?.to_target_usize(ecx)?;
 
                 let align = match Align::from_bytes(align) {
                     Ok(a) => a,
@@ -354,8 +487,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
             }
             sym::const_deallocate => {
                 let ptr = ecx.read_pointer(&args[0])?;
-                let size = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?;
-                let align = ecx.read_scalar(&args[2])?.to_machine_usize(ecx)?;
+                let size = ecx.read_scalar(&args[1])?.to_target_usize(ecx)?;
+                let align = ecx.read_scalar(&args[2])?.to_target_usize(ecx)?;
 
                 let size = Size::from_bytes(size);
                 let align = match Align::from_bytes(align) {
@@ -400,7 +533,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
         let eval_to_int =
             |op| ecx.read_immediate(&ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
         let err = match msg {
-            BoundsCheck { ref len, ref index } => {
+            BoundsCheck { len, index } => {
                 let len = eval_to_int(len)?;
                 let index = eval_to_int(index)?;
                 BoundsCheck { len, index }
@@ -428,8 +561,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
         throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
     }
 
-    fn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
-        // The step limit has already been hit in a previous call to `before_terminator`.
+    fn increment_const_eval_counter(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+        // The step limit has already been hit in a previous call to `increment_const_eval_counter`.
         if ecx.machine.steps_remaining == 0 {
             return Ok(());
         }
@@ -489,10 +622,9 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
         let alloc = alloc.inner();
         if is_write {
             // Write access. These are never allowed, but we give a targeted error message.
-            if alloc.mutability == Mutability::Not {
-                Err(err_ub!(WriteToReadOnly(alloc_id)).into())
-            } else {
-                Err(ConstEvalErrKind::ModifiedGlobal.into())
+            match alloc.mutability {
+                Mutability::Not => Err(err_ub!(WriteToReadOnly(alloc_id)).into()),
+                Mutability::Mut => Err(ConstEvalErrKind::ModifiedGlobal.into()),
             }
         } else {
             // Read access. These are usually allowed, with some exceptions.
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
index 1c33e7845cb..3cdf1e6e30c 100644
--- a/compiler/rustc_const_eval/src/const_eval/mod.rs
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -103,11 +103,11 @@ pub(crate) fn try_destructure_mir_constant<'tcx>(
 ) -> InterpResult<'tcx, mir::DestructuredConstant<'tcx>> {
     trace!("destructure_mir_constant: {:?}", val);
     let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
-    let op = ecx.const_to_op(&val, None)?;
+    let op = ecx.eval_mir_constant(&val, None, None)?;
 
     // We go to `usize` as we cannot allocate anything bigger anyway.
     let (field_count, variant, down) = match val.ty().kind() {
-        ty::Array(_, len) => (len.eval_usize(tcx, param_env) as usize, None, op),
+        ty::Array(_, len) => (len.eval_target_usize(tcx, param_env) as usize, None, op),
         ty::Adt(def, _) if def.variants().is_empty() => {
             throw_ub!(Unreachable)
         }
@@ -139,7 +139,7 @@ pub(crate) fn deref_mir_constant<'tcx>(
     val: mir::ConstantKind<'tcx>,
 ) -> mir::ConstantKind<'tcx> {
     let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
-    let op = ecx.const_to_op(&val, None).unwrap();
+    let op = ecx.eval_mir_constant(&val, None, None).unwrap();
     let mplace = ecx.deref_operand(&op).unwrap();
     if let Some(alloc_id) = mplace.ptr.provenance {
         assert_eq!(
@@ -155,7 +155,7 @@ pub(crate) fn deref_mir_constant<'tcx>(
         // In case of unsized types, figure out the real type behind.
         MemPlaceMeta::Meta(scalar) => match mplace.layout.ty.kind() {
             ty::Str => bug!("there's no sized equivalent of a `str`"),
-            ty::Slice(elem_ty) => tcx.mk_array(*elem_ty, scalar.to_machine_usize(&tcx).unwrap()),
+            ty::Slice(elem_ty) => tcx.mk_array(*elem_ty, scalar.to_target_usize(&tcx).unwrap()),
             _ => bug!(
                 "type {} should not have metadata, but had {:?}",
                 mplace.layout.ty,
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index f4da1188395..a73f778d4db 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -142,17 +142,16 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
         | ty::Foreign(..)
         | ty::Infer(ty::FreshIntTy(_))
         | ty::Infer(ty::FreshFloatTy(_))
-        | ty::Projection(..)
+        // FIXME(oli-obk): we could look behind opaque types
+        | ty::Alias(..)
         | ty::Param(_)
         | ty::Bound(..)
         | ty::Placeholder(..)
-        // FIXME(oli-obk): we could look behind opaque types
-        | ty::Opaque(..)
         | ty::Infer(_)
         // FIXME(oli-obk): we can probably encode closures just like structs
         | ty::Closure(..)
         | ty::Generator(..)
-        | ty::GeneratorWitness(..) => Err(ValTreeCreationError::NonSupportedType),
+        | ty::GeneratorWitness(..) |ty::GeneratorWitnessMIR(..)=> Err(ValTreeCreationError::NonSupportedType),
     }
 }
 
@@ -194,7 +193,7 @@ fn get_info_on_unsized_field<'tcx>(
 
     // Have to adjust type for ty::Str
     let unsized_inner_ty = match unsized_inner_ty.kind() {
-        ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)),
+        ty::Str => tcx.types.u8,
         _ => unsized_inner_ty,
     };
 
@@ -217,7 +216,7 @@ fn create_pointee_place<'tcx>(
 
         let (unsized_inner_ty, num_elems) = get_info_on_unsized_field(ty, valtree, tcx);
         let unsized_inner_ty = match unsized_inner_ty.kind() {
-            ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)),
+            ty::Str => tcx.types.u8,
             _ => unsized_inner_ty,
         };
         let unsized_inner_ty_size =
@@ -240,7 +239,7 @@ fn create_pointee_place<'tcx>(
         MPlaceTy::from_aligned_ptr_with_meta(
             ptr.into(),
             layout,
-            MemPlaceMeta::Meta(Scalar::from_machine_usize(num_elems as u64, &tcx)),
+            MemPlaceMeta::Meta(Scalar::from_target_usize(num_elems as u64, &tcx)),
         )
     } else {
         create_mplace_from_layout(ecx, ty)
@@ -307,15 +306,15 @@ pub fn valtree_to_const_value<'tcx>(
         | ty::Foreign(..)
         | ty::Infer(ty::FreshIntTy(_))
         | ty::Infer(ty::FreshFloatTy(_))
-        | ty::Projection(..)
+        | ty::Alias(..)
         | ty::Param(_)
         | ty::Bound(..)
         | ty::Placeholder(..)
-        | ty::Opaque(..)
         | ty::Infer(_)
         | ty::Closure(..)
         | ty::Generator(..)
         | ty::GeneratorWitness(..)
+        | ty::GeneratorWitnessMIR(..)
         | ty::FnPtr(_)
         | ty::RawPtr(_)
         | ty::Str
@@ -356,7 +355,7 @@ fn valtree_into_mplace<'tcx>(
             let imm = match inner_ty.kind() {
                 ty::Slice(_) | ty::Str => {
                     let len = valtree.unwrap_branch().len();
-                    let len_scalar = Scalar::from_machine_usize(len as u64, &tcx);
+                    let len_scalar = Scalar::from_target_usize(len as u64, &tcx);
 
                     Immediate::ScalarPair(
                         Scalar::from_maybe_pointer((*pointee_place).ptr, &tcx),
@@ -427,7 +426,7 @@ fn valtree_into_mplace<'tcx>(
                         place
                             .offset_with_meta(
                                 offset,
-                                MemPlaceMeta::Meta(Scalar::from_machine_usize(
+                                MemPlaceMeta::Meta(Scalar::from_target_usize(
                                     num_elems as u64,
                                     &tcx,
                                 )),
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
index 4b055076742..f8b7cc6d7e1 100644
--- a/compiler/rustc_const_eval/src/errors.rs
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -9,12 +9,12 @@ pub(crate) struct UnstableInStable {
     #[primary_span]
     pub span: Span,
     #[suggestion(
-        unstable_sugg,
+        const_eval_unstable_sugg,
         code = "#[rustc_const_unstable(feature = \"...\", issue = \"...\")]\n",
         applicability = "has-placeholders"
     )]
     #[suggestion(
-        bypass_sugg,
+        const_eval_bypass_sugg,
         code = "#[rustc_allow_const_fn_unstable({gate})]\n",
         applicability = "has-placeholders"
     )]
@@ -35,15 +35,15 @@ pub(crate) struct StaticAccessErr {
     #[primary_span]
     pub span: Span,
     pub kind: ConstContext,
-    #[note(teach_note)]
-    #[help(teach_help)]
+    #[note(const_eval_teach_note)]
+    #[help(const_eval_teach_help)]
     pub teach: Option<()>,
 }
 
 #[derive(Diagnostic)]
 #[diag(const_eval_raw_ptr_to_int)]
 #[note]
-#[note(note2)]
+#[note(const_eval_note2)]
 pub(crate) struct RawPtrToIntErr {
     #[primary_span]
     pub span: Span,
@@ -118,7 +118,7 @@ pub(crate) struct UnallowedMutableRefs {
     #[primary_span]
     pub span: Span,
     pub kind: ConstContext,
-    #[note(teach_note)]
+    #[note(const_eval_teach_note)]
     pub teach: Option<()>,
 }
 
@@ -128,7 +128,7 @@ pub(crate) struct UnallowedMutableRefsRaw {
     #[primary_span]
     pub span: Span,
     pub kind: ConstContext,
-    #[note(teach_note)]
+    #[note(const_eval_teach_note)]
     pub teach: Option<()>,
 }
 #[derive(Diagnostic)]
@@ -163,7 +163,7 @@ pub(crate) struct UnallowedHeapAllocations {
     #[label]
     pub span: Span,
     pub kind: ConstContext,
-    #[note(teach_note)]
+    #[note(const_eval_teach_note)]
     pub teach: Option<()>,
 }
 
@@ -184,7 +184,7 @@ pub(crate) struct InteriorMutableDataRefer {
     #[help]
     pub opt_help: Option<()>,
     pub kind: ConstContext,
-    #[note(teach_note)]
+    #[note(const_eval_teach_note)]
     pub teach: Option<()>,
 }
 
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index 269ae15d497..c14152a916a 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -1,5 +1,4 @@
 use std::assert_matches::assert_matches;
-use std::convert::TryFrom;
 
 use rustc_apfloat::ieee::{Double, Single};
 use rustc_apfloat::{Float, FloatConvert};
@@ -68,12 +67,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
 
             Pointer(PointerCast::ReifyFnPointer) => {
+                // All reifications must be monomorphic, bail out otherwise.
+                ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+
                 // The src operand does not matter, just its type
                 match *src.layout.ty.kind() {
                     ty::FnDef(def_id, substs) => {
-                        // All reifications must be monomorphic, bail out otherwise.
-                        ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
-
                         let instance = ty::Instance::resolve_for_fn_ptr(
                             *self.tcx,
                             self.param_env,
@@ -101,12 +100,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
 
             Pointer(PointerCast::ClosureFnPointer(_)) => {
+                // All reifications must be monomorphic, bail out otherwise.
+                ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+
                 // The src operand does not matter, just its type
                 match *src.layout.ty.kind() {
                     ty::Closure(def_id, substs) => {
-                        // All reifications must be monomorphic, bail out otherwise.
-                        ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
-
                         let instance = ty::Instance::resolve_closure(
                             *self.tcx,
                             def_id,
@@ -127,7 +126,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     let vtable = self.get_vtable_ptr(src.layout.ty, data.principal())?;
                     let vtable = Scalar::from_maybe_pointer(vtable, self);
                     let data = self.read_immediate(src)?.to_scalar();
-                    let _assert_pointer_sized = data.to_pointer(self)?;
+                    let _assert_pointer_like = data.to_pointer(self)?;
                     let val = Immediate::ScalarPair(data, vtable);
                     self.write_immediate(val, dest)?;
                 } else {
@@ -232,7 +231,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // First cast to usize.
         let scalar = src.to_scalar();
         let addr = self.cast_from_int_like(scalar, src.layout, self.tcx.types.usize)?;
-        let addr = addr.to_machine_usize(self)?;
+        let addr = addr.to_target_usize(self)?;
 
         // Then turn address into pointer.
         let ptr = M::ptr_from_addr_cast(&self, addr)?;
@@ -313,6 +312,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         }
     }
 
+    /// `src` is a *pointer to* a `source_ty`, and in `dest` we should store a pointer to th same
+    /// data at type `cast_ty`.
     fn unsize_into_ptr(
         &mut self,
         src: &OpTy<'tcx, M::Provenance>,
@@ -329,11 +330,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             (&ty::Array(_, length), &ty::Slice(_)) => {
                 let ptr = self.read_scalar(src)?;
                 // u64 cast is from usize to u64, which is always good
-                let val =
-                    Immediate::new_slice(ptr, length.eval_usize(*self.tcx, self.param_env), self);
+                let val = Immediate::new_slice(
+                    ptr,
+                    length.eval_target_usize(*self.tcx, self.param_env),
+                    self,
+                );
                 self.write_immediate(val, dest)
             }
-            (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
+            (ty::Dynamic(data_a, _, ty::Dyn), ty::Dynamic(data_b, _, ty::Dyn)) => {
                 let val = self.read_immediate(src)?;
                 if data_a.principal() == data_b.principal() {
                     // A NOP cast that doesn't actually change anything, should be allowed even with mismatching vtables.
@@ -348,16 +352,24 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let new_vptr = self.get_vtable_ptr(ty, data_b.principal())?;
                 self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest)
             }
-            (_, &ty::Dynamic(ref data, _, ty::Dyn)) => {
+            (_, &ty::Dynamic(data, _, ty::Dyn)) => {
                 // Initial cast from sized to dyn trait
                 let vtable = self.get_vtable_ptr(src_pointee_ty, data.principal())?;
                 let ptr = self.read_scalar(src)?;
                 let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx);
                 self.write_immediate(val, dest)
             }
-
             _ => {
-                span_bug!(self.cur_span(), "invalid unsizing {:?} -> {:?}", src.layout.ty, cast_ty)
+                // Do not ICE if we are not monomorphic enough.
+                ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+                ensure_monomorphic_enough(*self.tcx, cast_ty)?;
+
+                span_bug!(
+                    self.cur_span(),
+                    "invalid pointer unsizing {:?} -> {:?}",
+                    src.layout.ty,
+                    cast_ty
+                )
             }
         }
     }
@@ -395,12 +407,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 }
                 Ok(())
             }
-            _ => span_bug!(
-                self.cur_span(),
-                "unsize_into: invalid conversion: {:?} -> {:?}",
-                src.layout,
-                dest.layout
-            ),
+            _ => {
+                // Do not ICE if we are not monomorphic enough.
+                ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+                ensure_monomorphic_enough(*self.tcx, cast_ty.ty)?;
+
+                span_bug!(
+                    self.cur_span(),
+                    "unsize_into: invalid conversion: {:?} -> {:?}",
+                    src.layout,
+                    dest.layout
+                )
+            }
         }
     }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
new file mode 100644
index 00000000000..557e721249d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -0,0 +1,238 @@
+//! Functions for reading and writing discriminants of multi-variant layouts (enums and generators).
+
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt};
+use rustc_middle::{mir, ty};
+use rustc_target::abi::{self, TagEncoding};
+use rustc_target::abi::{VariantIdx, Variants};
+
+use super::{ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Scalar};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Writes the discriminant of the given variant.
+    #[instrument(skip(self), level = "trace")]
+    pub fn write_discriminant(
+        &mut self,
+        variant_index: VariantIdx,
+        dest: &PlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        // Layout computation excludes uninhabited variants from consideration
+        // therefore there's no way to represent those variants in the given layout.
+        // Essentially, uninhabited variants do not have a tag that corresponds to their
+        // discriminant, so we cannot do anything here.
+        // When evaluating we will always error before even getting here, but ConstProp 'executes'
+        // dead code, so we cannot ICE here.
+        if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
+            throw_ub!(UninhabitedEnumVariantWritten)
+        }
+
+        match dest.layout.variants {
+            abi::Variants::Single { index } => {
+                assert_eq!(index, variant_index);
+            }
+            abi::Variants::Multiple {
+                tag_encoding: TagEncoding::Direct,
+                tag: tag_layout,
+                tag_field,
+                ..
+            } => {
+                // No need to validate that the discriminant here because the
+                // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+                let discr_val =
+                    dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
+
+                // raw discriminants for enums are isize or bigger during
+                // their computation, but the in-memory tag is the smallest possible
+                // representation
+                let size = tag_layout.size(self);
+                let tag_val = size.truncate(discr_val);
+
+                let tag_dest = self.place_field(dest, tag_field)?;
+                self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
+            }
+            abi::Variants::Multiple {
+                tag_encoding:
+                    TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
+                tag: tag_layout,
+                tag_field,
+                ..
+            } => {
+                // No need to validate that the discriminant here because the
+                // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+                if variant_index != untagged_variant {
+                    let variants_start = niche_variants.start().as_u32();
+                    let variant_index_relative = variant_index
+                        .as_u32()
+                        .checked_sub(variants_start)
+                        .expect("overflow computing relative variant idx");
+                    // We need to use machine arithmetic when taking into account `niche_start`:
+                    // tag_val = variant_index_relative + niche_start_val
+                    let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?;
+                    let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+                    let variant_index_relative_val =
+                        ImmTy::from_uint(variant_index_relative, tag_layout);
+                    let tag_val = self.binary_op(
+                        mir::BinOp::Add,
+                        &variant_index_relative_val,
+                        &niche_start_val,
+                    )?;
+                    // Write result.
+                    let niche_dest = self.place_field(dest, tag_field)?;
+                    self.write_immediate(*tag_val, &niche_dest)?;
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    /// Read discriminant, return the runtime value as well as the variant index.
+    /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
+    #[instrument(skip(self), level = "trace")]
+    pub fn read_discriminant(
+        &self,
+        op: &OpTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
+        trace!("read_discriminant_value {:#?}", op.layout);
+        // Get type and layout of the discriminant.
+        let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
+        trace!("discriminant type: {:?}", discr_layout.ty);
+
+        // We use "discriminant" to refer to the value associated with a particular enum variant.
+        // This is not to be confused with its "variant index", which is just determining its position in the
+        // declared list of variants -- they can differ with explicitly assigned discriminants.
+        // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
+        // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
+        let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
+            Variants::Single { index } => {
+                let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
+                    Some(discr) => {
+                        // This type actually has discriminants.
+                        assert_eq!(discr.ty, discr_layout.ty);
+                        Scalar::from_uint(discr.val, discr_layout.size)
+                    }
+                    None => {
+                        // On a type without actual discriminants, variant is 0.
+                        assert_eq!(index.as_u32(), 0);
+                        Scalar::from_uint(index.as_u32(), discr_layout.size)
+                    }
+                };
+                return Ok((discr, index));
+            }
+            Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
+                (tag, tag_encoding, tag_field)
+            }
+        };
+
+        // There are *three* layouts that come into play here:
+        // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
+        //   the `Scalar` we return.
+        // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
+        //   and used to interpret the value we read from the tag field.
+        //   For the return value, a cast to `discr_layout` is performed.
+        // - The field storing the tag has a layout, which is very similar to `tag_layout` but
+        //   may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
+
+        // Get layout for tag.
+        let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
+
+        // Read tag and sanity-check `tag_layout`.
+        let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
+        assert_eq!(tag_layout.size, tag_val.layout.size);
+        assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
+        trace!("tag value: {}", tag_val);
+
+        // Figure out which discriminant and variant this corresponds to.
+        Ok(match *tag_encoding {
+            TagEncoding::Direct => {
+                let scalar = tag_val.to_scalar();
+                // Generate a specific error if `tag_val` is not an integer.
+                // (`tag_bits` itself is only used for error messages below.)
+                let tag_bits = scalar
+                    .try_to_int()
+                    .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
+                    .assert_bits(tag_layout.size);
+                // Cast bits from tag layout to discriminant layout.
+                // After the checks we did above, this cannot fail, as
+                // discriminants are int-like.
+                let discr_val =
+                    self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
+                let discr_bits = discr_val.assert_bits(discr_layout.size);
+                // Convert discriminant to variant index, and catch invalid discriminants.
+                let index = match *op.layout.ty.kind() {
+                    ty::Adt(adt, _) => {
+                        adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
+                    }
+                    ty::Generator(def_id, substs, _) => {
+                        let substs = substs.as_generator();
+                        substs
+                            .discriminants(def_id, *self.tcx)
+                            .find(|(_, var)| var.val == discr_bits)
+                    }
+                    _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
+                }
+                .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
+                // Return the cast value, and the index.
+                (discr_val, index.0)
+            }
+            TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
+                let tag_val = tag_val.to_scalar();
+                // Compute the variant this niche value/"tag" corresponds to. With niche layout,
+                // discriminant (encoded in niche/tag) and variant index are the same.
+                let variants_start = niche_variants.start().as_u32();
+                let variants_end = niche_variants.end().as_u32();
+                let variant = match tag_val.try_to_int() {
+                    Err(dbg_val) => {
+                        // So this is a pointer then, and casting to an int failed.
+                        // Can only happen during CTFE.
+                        // The niche must be just 0, and the ptr not null, then we know this is
+                        // okay. Everything else, we conservatively reject.
+                        let ptr_valid = niche_start == 0
+                            && variants_start == variants_end
+                            && !self.scalar_may_be_null(tag_val)?;
+                        if !ptr_valid {
+                            throw_ub!(InvalidTag(dbg_val))
+                        }
+                        untagged_variant
+                    }
+                    Ok(tag_bits) => {
+                        let tag_bits = tag_bits.assert_bits(tag_layout.size);
+                        // We need to use machine arithmetic to get the relative variant idx:
+                        // variant_index_relative = tag_val - niche_start_val
+                        let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
+                        let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+                        let variant_index_relative_val =
+                            self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
+                        let variant_index_relative =
+                            variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
+                        // Check if this is in the range that indicates an actual discriminant.
+                        if variant_index_relative <= u128::from(variants_end - variants_start) {
+                            let variant_index_relative = u32::try_from(variant_index_relative)
+                                .expect("we checked that this fits into a u32");
+                            // Then computing the absolute variant idx should not overflow any more.
+                            let variant_index = variants_start
+                                .checked_add(variant_index_relative)
+                                .expect("overflow computing absolute variant idx");
+                            let variants_len = op
+                                .layout
+                                .ty
+                                .ty_adt_def()
+                                .expect("tagged layout for non adt")
+                                .variants()
+                                .len();
+                            assert!(usize::try_from(variant_index).unwrap() < variants_len);
+                            VariantIdx::from_u32(variant_index)
+                        } else {
+                            untagged_variant
+                        }
+                    }
+                };
+                // Compute the size of the scalar we need to return.
+                // No need to cast, because the variant index directly serves as discriminant and is
+                // encoded in the tag.
+                (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
+            }
+        })
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index f7d64f6d4f4..39c74191258 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -2,10 +2,12 @@ use std::cell::Cell;
 use std::fmt;
 use std::mem;
 
+use either::{Either, Left, Right};
+
 use rustc_hir::{self as hir, def_id::DefId, definitions::DefPathData};
 use rustc_index::vec::IndexVec;
 use rustc_middle::mir;
-use rustc_middle::mir::interpret::{InterpError, InvalidProgramInfo};
+use rustc_middle::mir::interpret::{ErrorHandled, InterpError};
 use rustc_middle::ty::layout::{
     self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers,
     TyAndLayout,
@@ -15,7 +17,7 @@ use rustc_middle::ty::{
 };
 use rustc_mir_dataflow::storage::always_storage_live_locals;
 use rustc_session::Limit;
-use rustc_span::{Pos, Span};
+use rustc_span::Span;
 use rustc_target::abi::{call::FnAbi, Align, HasDataLayout, Size, TargetDataLayout};
 
 use super::{
@@ -23,7 +25,7 @@ use super::{
     MemPlaceMeta, Memory, MemoryKind, Operand, Place, PlaceTy, PointerArithmetic, Provenance,
     Scalar, StackPopJump,
 };
-use crate::transform::validate::equal_up_to_regions;
+use crate::util;
 
 pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
     /// Stores the `Machine` instance.
@@ -121,13 +123,12 @@ pub struct Frame<'mir, 'tcx, Prov: Provenance = AllocId, Extra = ()> {
     ////////////////////////////////////////////////////////////////////////////////
     // Current position within the function
     ////////////////////////////////////////////////////////////////////////////////
-    /// If this is `Err`, we are not currently executing any particular statement in
+    /// If this is `Right`, we are not currently executing any particular statement in
     /// this frame (can happen e.g. during frame initialization, and during unwinding on
     /// frames without cleanup code).
-    /// We basically abuse `Result` as `Either`.
     ///
     /// Needs to be public because ConstProp does unspeakable things to it.
-    pub loc: Result<mir::Location, Span>,
+    pub loc: Either<mir::Location, Span>,
 }
 
 /// What we store about a frame in an interpreter backtrace.
@@ -195,7 +196,7 @@ impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> {
         }
     }
 
-    /// Overwrite the local.  If the local can be overwritten in place, return a reference
+    /// Overwrite the local. If the local can be overwritten in place, return a reference
     /// to do so; otherwise return the `MemPlace` to consult instead.
     ///
     /// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from
@@ -227,27 +228,35 @@ impl<'mir, 'tcx, Prov: Provenance> Frame<'mir, 'tcx, Prov> {
 impl<'mir, 'tcx, Prov: Provenance, Extra> Frame<'mir, 'tcx, Prov, Extra> {
     /// Get the current location within the Frame.
     ///
-    /// If this is `Err`, we are not currently executing any particular statement in
+    /// If this is `Left`, we are not currently executing any particular statement in
     /// this frame (can happen e.g. during frame initialization, and during unwinding on
     /// frames without cleanup code).
-    /// We basically abuse `Result` as `Either`.
     ///
     /// Used by priroda.
-    pub fn current_loc(&self) -> Result<mir::Location, Span> {
+    pub fn current_loc(&self) -> Either<mir::Location, Span> {
         self.loc
     }
 
     /// Return the `SourceInfo` of the current instruction.
     pub fn current_source_info(&self) -> Option<&mir::SourceInfo> {
-        self.loc.ok().map(|loc| self.body.source_info(loc))
+        self.loc.left().map(|loc| self.body.source_info(loc))
     }
 
     pub fn current_span(&self) -> Span {
         match self.loc {
-            Ok(loc) => self.body.source_info(loc).span,
-            Err(span) => span,
+            Left(loc) => self.body.source_info(loc).span,
+            Right(span) => span,
         }
     }
+
+    pub fn lint_root(&self) -> Option<hir::HirId> {
+        self.current_source_info().and_then(|source_info| {
+            match &self.body.source_scopes[source_info.scope].local_data {
+                mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
+                mir::ClearCrossCrate::Clear => None,
+            }
+        })
+    }
 }
 
 impl<'tcx> fmt::Display for FrameInfo<'tcx> {
@@ -256,25 +265,13 @@ impl<'tcx> fmt::Display for FrameInfo<'tcx> {
             if tcx.def_key(self.instance.def_id()).disambiguated_data.data
                 == DefPathData::ClosureExpr
             {
-                write!(f, "inside closure")?;
+                write!(f, "inside closure")
             } else {
                 // Note: this triggers a `good_path_bug` state, which means that if we ever get here
                 // we must emit a diagnostic. We should never display a `FrameInfo` unless we
                 // actually want to emit a warning or error to the user.
-                write!(f, "inside `{}`", self.instance)?;
+                write!(f, "inside `{}`", self.instance)
             }
-            if !self.span.is_dummy() {
-                let sm = tcx.sess.source_map();
-                let lo = sm.lookup_char_pos(self.span.lo());
-                write!(
-                    f,
-                    " at {}:{}:{}",
-                    sm.filename_for_diagnostics(&lo.file.name),
-                    lo.line,
-                    lo.col.to_usize() + 1
-                )?;
-            }
-            Ok(())
         })
     }
 }
@@ -354,8 +351,8 @@ pub(super) fn mir_assign_valid_types<'tcx>(
     // Type-changing assignments can happen when subtyping is used. While
     // all normal lifetimes are erased, higher-ranked types with their
     // late-bound lifetimes are still around and can lead to type
-    // differences. So we compare ignoring lifetimes.
-    if equal_up_to_regions(tcx, param_env, src.ty, dest.ty) {
+    // differences.
+    if util::is_subtype(tcx, param_env, src.ty, dest.ty) {
         // Make sure the layout is equal, too -- just to be safe. Miri really
         // needs layout equality. For performance reason we skip this check when
         // the types are equal. Equal types *can* have different layouts when
@@ -492,7 +489,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
     /// Call this on things you got out of the MIR (so it is as generic as the current
     /// stack frame), to bring it into the proper environment for this interpreter.
-    pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
+    pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<
+        T: TypeFoldable<TyCtxt<'tcx>>,
+    >(
         &self,
         value: T,
     ) -> Result<T, InterpError<'tcx>> {
@@ -501,7 +500,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
     /// Call this on things you got out of the MIR (so it is as generic as the provided
     /// stack frame), to bring it into the proper environment for this interpreter.
-    pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
+    pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<TyCtxt<'tcx>>>(
         &self,
         frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
         value: T,
@@ -509,14 +508,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         frame
             .instance
             .try_subst_mir_and_normalize_erasing_regions(*self.tcx, self.param_env, value)
-            .map_err(|e| {
-                self.tcx.sess.delay_span_bug(
-                    self.cur_span(),
-                    format!("failed to normalize {}", e.get_type_for_failure()).as_str(),
-                );
-
-                InterpError::InvalidProgram(InvalidProgramInfo::TooGeneric)
-            })
+            .map_err(|_| err_inval!(TooGeneric))
     }
 
     /// The `substs` are assumed to already be in our interpreter "universe" (param_env).
@@ -572,7 +564,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         metadata: &MemPlaceMeta<M::Provenance>,
         layout: &TyAndLayout<'tcx>,
     ) -> InterpResult<'tcx, Option<(Size, Align)>> {
-        if !layout.is_unsized() {
+        if layout.is_sized() {
             return Ok(Some((layout.size, layout.align.abi)));
         }
         match layout.ty.kind() {
@@ -595,7 +587,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 );
 
                 // Recurse to get the size of the dynamically sized field (must be
-                // the last field).  Can't have foreign types here, how would we
+                // the last field). Can't have foreign types here, how would we
                 // adjust alignment and size for them?
                 let field = layout.field(self, layout.fields.count() - 1);
                 let Some((unsized_size, mut unsized_align)) = self.size_and_align_of(metadata, &field)? else {
@@ -635,14 +627,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 }
                 Ok(Some((size, align)))
             }
-            ty::Dynamic(..) => {
+            ty::Dynamic(_, _, ty::Dyn) => {
                 let vtable = metadata.unwrap_meta().to_pointer(self)?;
                 // Read size and align from vtable (already checks size).
                 Ok(Some(self.get_vtable_size_and_align(vtable)?))
             }
 
             ty::Slice(_) | ty::Str => {
-                let len = metadata.unwrap_meta().to_machine_usize(self)?;
+                let len = metadata.unwrap_meta().to_target_usize(self)?;
                 let elem = layout.field(self, 0);
 
                 // Make sure the slice is not too big.
@@ -676,10 +668,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         return_to_block: StackPopCleanup,
     ) -> InterpResult<'tcx> {
         trace!("body: {:#?}", body);
+        // Clobber previous return place contents, nobody is supposed to be able to see them any more
+        // This also checks dereferenceable, but not align. We rely on all constructed places being
+        // sufficiently aligned (in particular we rely on `deref_operand` checking alignment).
+        self.write_uninit(return_place)?;
         // first push a stack frame so we have access to the local substs
         let pre_frame = Frame {
             body,
-            loc: Err(body.span), // Span used for errors caused during preamble.
+            loc: Right(body.span), // Span used for errors caused during preamble.
             return_to_block,
             return_place: return_place.clone(),
             // empty local array, we fill it in below, after we are inside the stack frame and
@@ -696,12 +692,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         for ct in &body.required_consts {
             let span = ct.span;
             let ct = self.subst_from_current_frame_and_normalize_erasing_regions(ct.literal)?;
-            self.const_to_op(&ct, None).map_err(|err| {
-                // If there was an error, set the span of the current frame to this constant.
-                // Avoiding doing this when evaluation succeeds.
-                self.frame_mut().loc = Err(span);
-                err
-            })?;
+            self.eval_mir_constant(&ct, Some(span), None)?;
         }
 
         // Most locals are initially dead.
@@ -718,7 +709,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // done
         self.frame_mut().locals = locals;
         M::after_stack_push(self)?;
-        self.frame_mut().loc = Ok(mir::Location::START);
+        self.frame_mut().loc = Left(mir::Location::START);
 
         let span = info_span!("frame", "{}", instance);
         self.frame_mut().tracing_span.enter(span);
@@ -729,7 +720,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// Jump to the given block.
     #[inline]
     pub fn go_to_block(&mut self, target: mir::BasicBlock) {
-        self.frame_mut().loc = Ok(mir::Location { block: target, statement_index: 0 });
+        self.frame_mut().loc = Left(mir::Location { block: target, statement_index: 0 });
     }
 
     /// *Return* to the given `target` basic block.
@@ -755,8 +746,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// unwinding, and doing so is UB.
     pub fn unwind_to_block(&mut self, target: StackPopUnwind) -> InterpResult<'tcx> {
         self.frame_mut().loc = match target {
-            StackPopUnwind::Cleanup(block) => Ok(mir::Location { block, statement_index: 0 }),
-            StackPopUnwind::Skip => Err(self.frame_mut().body.span),
+            StackPopUnwind::Cleanup(block) => Left(mir::Location { block, statement_index: 0 }),
+            StackPopUnwind::Skip => Right(self.frame_mut().body.span),
             StackPopUnwind::NotAllowed => {
                 throw_ub_format!("unwinding past a stack frame that does not allow unwinding")
             }
@@ -788,8 +779,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         assert_eq!(
             unwinding,
             match self.frame().loc {
-                Ok(loc) => self.body().basic_blocks[loc.block].is_cleanup,
-                Err(_) => true,
+                Left(loc) => self.body().basic_blocks[loc.block].is_cleanup,
+                Right(_) => true,
             }
         );
         if unwinding && self.frame_idx() == 0 {
@@ -912,9 +903,32 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         Ok(())
     }
 
-    pub fn eval_to_allocation(
+    /// Call a query that can return `ErrorHandled`. If `span` is `Some`, point to that span when an error occurs.
+    pub fn ctfe_query<T>(
+        &self,
+        span: Option<Span>,
+        query: impl FnOnce(TyCtxtAt<'tcx>) -> Result<T, ErrorHandled>,
+    ) -> InterpResult<'tcx, T> {
+        // Use a precise span for better cycle errors.
+        query(self.tcx.at(span.unwrap_or_else(|| self.cur_span()))).map_err(|err| {
+            match err {
+                ErrorHandled::Reported(err) => {
+                    if let Some(span) = span {
+                        // To make it easier to figure out where this error comes from, also add a note at the current location.
+                        self.tcx.sess.span_note_without_error(span, "erroneous constant used");
+                    }
+                    err_inval!(AlreadyReported(err))
+                }
+                ErrorHandled::TooGeneric => err_inval!(TooGeneric),
+            }
+            .into()
+        })
+    }
+
+    pub fn eval_global(
         &self,
         gid: GlobalId<'tcx>,
+        span: Option<Span>,
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
         // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
         // and thus don't care about the parameter environment. While we could just use
@@ -927,8 +941,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             self.param_env
         };
         let param_env = param_env.with_const();
-        // Use a precise span for better cycle errors.
-        let val = self.tcx.at(self.cur_span()).eval_to_allocation_raw(param_env.and(gid))?;
+        let val = self.ctfe_query(span, |tcx| tcx.eval_to_allocation_raw(param_env.and(gid)))?;
         self.raw_const_to_mplace(val)
     }
 
@@ -945,12 +958,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // This deliberately does *not* honor `requires_caller_location` since it is used for much
         // more than just panics.
         for frame in stack.iter().rev() {
-            let lint_root = frame.current_source_info().and_then(|source_info| {
-                match &frame.body.source_scopes[source_info.scope].local_data {
-                    mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
-                    mir::ClearCrossCrate::Clear => None,
-                }
-            });
+            let lint_root = frame.lint_root();
             let span = frame.current_span();
 
             frames.push(FrameInfo { span, instance: frame.instance, lint_root });
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 6809a42dc45..b220d21f68b 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -30,15 +30,15 @@ use super::{
 use crate::const_eval;
 
 pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine<
-    'mir,
-    'tcx,
-    MemoryKind = T,
-    Provenance = AllocId,
-    ExtraFnVal = !,
-    FrameExtra = (),
-    AllocExtra = (),
-    MemoryMap = FxIndexMap<AllocId, (MemoryKind<T>, Allocation)>,
->;
+        'mir,
+        'tcx,
+        MemoryKind = T,
+        Provenance = AllocId,
+        ExtraFnVal = !,
+        FrameExtra = (),
+        AllocExtra = (),
+        MemoryMap = FxIndexMap<AllocId, (MemoryKind<T>, Allocation)>,
+    >;
 
 struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>> {
     /// The ectx from which we intern.
@@ -59,7 +59,7 @@ struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_ev
 
 #[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
 enum InternMode {
-    /// A static and its current mutability.  Below shared references inside a `static mut`,
+    /// A static and its current mutability. Below shared references inside a `static mut`,
     /// this is *immutable*, and below mutable references inside an `UnsafeCell`, this
     /// is *mutable*.
     Static(hir::Mutability),
@@ -134,8 +134,8 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval:
         alloc.mutability = Mutability::Not;
     };
     // link the alloc id to the actual allocation
-    leftover_allocations.extend(alloc.provenance().iter().map(|&(_, alloc_id)| alloc_id));
-    let alloc = tcx.intern_const_alloc(alloc);
+    leftover_allocations.extend(alloc.provenance().ptrs().iter().map(|&(_, alloc_id)| alloc_id));
+    let alloc = tcx.mk_const_alloc(alloc);
     tcx.set_alloc_id_memory(alloc_id, alloc);
     None
 }
@@ -242,7 +242,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
             let mplace = self.ecx.ref_to_mplace(&value)?;
             assert_eq!(mplace.layout.ty, referenced_ty);
             // Handle trait object vtables.
-            if let ty::Dynamic(..) =
+            if let ty::Dynamic(_, _, ty::Dyn) =
                 tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
             {
                 let ptr = mplace.meta.unwrap_meta().to_pointer(&tcx)?;
@@ -296,7 +296,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
                         }
                     }
                     InternMode::Const => {
-                        // Ignore `UnsafeCell`, everything is immutable.  Validity does some sanity
+                        // Ignore `UnsafeCell`, everything is immutable. Validity does some sanity
                         // checking for mutable references that we encounter -- they must all be
                         // ZST.
                         InternMode::Const
@@ -330,7 +330,7 @@ pub enum InternKind {
 
 /// Intern `ret` and everything it references.
 ///
-/// This *cannot raise an interpreter error*.  Doing so is left to validation, which
+/// This *cannot raise an interpreter error*. Doing so is left to validation, which
 /// tracks where in the value we are and thus can show much better error messages.
 #[instrument(level = "debug", skip(ecx))]
 pub fn intern_const_alloc_recursive<
@@ -379,7 +379,7 @@ pub fn intern_const_alloc_recursive<
             inside_unsafe_cell: false,
         }
         .visit_value(&mplace);
-        // We deliberately *ignore* interpreter errors here.  When there is a problem, the remaining
+        // We deliberately *ignore* interpreter errors here. When there is a problem, the remaining
         // references are "leftover"-interned, and later validation will show a proper error
         // and point at the right part of the value causing the problem.
         match res {
@@ -437,9 +437,9 @@ pub fn intern_const_alloc_recursive<
                     alloc.mutability = Mutability::Not;
                 }
             }
-            let alloc = tcx.intern_const_alloc(alloc);
+            let alloc = tcx.mk_const_alloc(alloc);
             tcx.set_alloc_id_memory(alloc_id, alloc);
-            for &(_, alloc_id) in alloc.inner().provenance().iter() {
+            for &(_, alloc_id) in alloc.inner().provenance().ptrs().iter() {
                 if leftover_allocations.insert(alloc_id) {
                     todo.push(alloc_id);
                 }
@@ -454,7 +454,7 @@ pub fn intern_const_alloc_recursive<
             return Err(reported);
         } else if ecx.tcx.try_get_global_alloc(alloc_id).is_none() {
             // We have hit an `AllocId` that is neither in local or global memory and isn't
-            // marked as dangling by local memory.  That should be impossible.
+            // marked as dangling by local memory. That should be impossible.
             span_bug!(ecx.tcx.span, "encountered unknown alloc id {:?}", alloc_id);
         }
     }
@@ -479,6 +479,6 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
         f(self, &dest.into())?;
         let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
         alloc.mutability = Mutability::Not;
-        Ok(self.tcx.intern_const_alloc(alloc))
+        Ok(self.tcx.mk_const_alloc(alloc))
     }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index b92a6878847..a29cdade023 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -2,8 +2,6 @@
 //! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
 //! and miri.
 
-use std::convert::TryFrom;
-
 use rustc_hir::def_id::DefId;
 use rustc_middle::mir::{
     self,
@@ -13,7 +11,7 @@ use rustc_middle::mir::{
     BinOp, NonDivergingIntrinsic,
 };
 use rustc_middle::ty;
-use rustc_middle::ty::layout::LayoutOf as _;
+use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
 use rustc_middle::ty::subst::SubstsRef;
 use rustc_middle::ty::{Ty, TyCtxt};
 use rustc_span::symbol::{sym, Symbol};
@@ -47,7 +45,7 @@ fn numeric_intrinsic<Prov>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<
 pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAllocation<'tcx> {
     let path = crate::util::type_name(tcx, ty);
     let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes());
-    tcx.intern_const_alloc(alloc)
+    tcx.mk_const_alloc(alloc)
 }
 
 /// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
@@ -73,7 +71,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
         sym::pref_align_of => {
             // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
             let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
-            ConstValue::from_machine_usize(layout.align.pref.bytes(), &tcx)
+            ConstValue::from_target_usize(layout.align.pref.bytes(), &tcx)
         }
         sym::type_id => {
             ensure_monomorphic_enough(tcx, tp_ty)?;
@@ -81,14 +79,10 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
         }
         sym::variant_count => match tp_ty.kind() {
             // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
-            ty::Adt(ref adt, _) => {
-                ConstValue::from_machine_usize(adt.variants().len() as u64, &tcx)
+            ty::Adt(adt, _) => ConstValue::from_target_usize(adt.variants().len() as u64, &tcx),
+            ty::Alias(..) | ty::Param(_) | ty::Placeholder(_) | ty::Infer(_) => {
+                throw_inval!(TooGeneric)
             }
-            ty::Projection(_)
-            | ty::Opaque(_, _)
-            | ty::Param(_)
-            | ty::Placeholder(_)
-            | ty::Infer(_) => throw_inval!(TooGeneric),
             ty::Bound(_, _) => bug!("bound ty during ctfe"),
             ty::Bool
             | ty::Char
@@ -107,9 +101,10 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
             | ty::Closure(_, _)
             | ty::Generator(_, _, _)
             | ty::GeneratorWitness(_)
+            | ty::GeneratorWitnessMIR(_, _)
             | ty::Never
             | ty::Tuple(_)
-            | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx),
+            | ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx),
         },
         other => bug!("`{}` is not a zero arg intrinsic", other),
     })
@@ -161,7 +156,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     _ => bug!(),
                 };
 
-                self.write_scalar(Scalar::from_machine_usize(result, self), dest)?;
+                self.write_scalar(Scalar::from_target_usize(result, self), dest)?;
             }
 
             sym::pref_align_of
@@ -177,8 +172,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     sym::type_name => self.tcx.mk_static_str(),
                     _ => bug!(),
                 };
-                let val =
-                    self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
+                let val = self.ctfe_query(None, |tcx| {
+                    tcx.const_eval_global_id(self.param_env, gid, Some(tcx.span))
+                })?;
                 let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
                 self.copy_op(&val, dest, /*allow_transmute*/ false)?;
             }
@@ -214,19 +210,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let out_val = numeric_intrinsic(intrinsic_name, bits, kind);
                 self.write_scalar(out_val, dest)?;
             }
-            sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
-                let lhs = self.read_immediate(&args[0])?;
-                let rhs = self.read_immediate(&args[1])?;
-                let bin_op = match intrinsic_name {
-                    sym::add_with_overflow => BinOp::Add,
-                    sym::sub_with_overflow => BinOp::Sub,
-                    sym::mul_with_overflow => BinOp::Mul,
-                    _ => bug!(),
-                };
-                self.binop_with_overflow(
-                    bin_op, /*force_overflow_checks*/ true, &lhs, &rhs, dest,
-                )?;
-            }
             sym::saturating_add | sym::saturating_sub => {
                 let l = self.read_immediate(&args[0])?;
                 let r = self.read_immediate(&args[1])?;
@@ -242,6 +225,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let discr_val = self.read_discriminant(&place.into())?.0;
                 self.write_scalar(discr_val, dest)?;
             }
+            sym::exact_div => {
+                let l = self.read_immediate(&args[0])?;
+                let r = self.read_immediate(&args[1])?;
+                self.exact_div(&l, &r, dest)?;
+            }
             sym::unchecked_shl
             | sym::unchecked_shr
             | sym::unchecked_add
@@ -301,7 +289,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
             sym::offset => {
                 let ptr = self.read_pointer(&args[0])?;
-                let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
+                let offset_count = self.read_target_isize(&args[1])?;
                 let pointee_ty = substs.type_at(0);
 
                 let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
@@ -309,7 +297,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
             sym::arith_offset => {
                 let ptr = self.read_pointer(&args[0])?;
-                let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
+                let offset_count = self.read_target_isize(&args[1])?;
                 let pointee_ty = substs.type_at(0);
 
                 let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
@@ -375,7 +363,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         // The signed form of the intrinsic allows this. If we interpret the
                         // difference as isize, we'll get the proper signed difference. If that
                         // seems *positive*, they were more than isize::MAX apart.
-                        let dist = val.to_machine_isize(self)?;
+                        let dist = val.to_target_isize(self)?;
                         if dist >= 0 {
                             throw_ub_format!(
                                 "`{}` called when first pointer is too far before second",
@@ -385,7 +373,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         dist
                     } else {
                         // b >= a
-                        let dist = val.to_machine_isize(self)?;
+                        let dist = val.to_target_isize(self)?;
                         // If converting to isize produced a *negative* result, we had an overflow
                         // because they were more than isize::MAX apart.
                         if dist < 0 {
@@ -410,10 +398,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
                 // Perform division by size to compute return value.
                 let ret_layout = if intrinsic_name == sym::ptr_offset_from_unsigned {
-                    assert!(0 <= dist && dist <= self.machine_isize_max());
+                    assert!(0 <= dist && dist <= self.target_isize_max());
                     usize_layout
                 } else {
-                    assert!(self.machine_isize_min() <= dist && dist <= self.machine_isize_max());
+                    assert!(self.target_isize_min() <= dist && dist <= self.target_isize_max());
                     isize_layout
                 };
                 let pointee_layout = self.layout_of(substs.type_at(0))?;
@@ -426,50 +414,40 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             sym::transmute => {
                 self.copy_op(&args[0], dest, /*allow_transmute*/ true)?;
             }
-            sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
+            sym::assert_inhabited
+            | sym::assert_zero_valid
+            | sym::assert_mem_uninitialized_valid => {
                 let ty = instance.substs.type_at(0);
-                let layout = self.layout_of(ty)?;
-
-                // For *all* intrinsics we first check `is_uninhabited` to give a more specific
-                // error message.
-                if layout.abi.is_uninhabited() {
-                    // The run-time intrinsic panics just to get a good backtrace; here we abort
-                    // since there is no problem showing a backtrace even for aborts.
-                    M::abort(
-                        self,
-                        format!(
+                let requirement = ValidityRequirement::from_intrinsic(intrinsic_name).unwrap();
+
+                let should_panic = !self
+                    .tcx
+                    .check_validity_requirement((requirement, self.param_env.and(ty)))
+                    .map_err(|_| err_inval!(TooGeneric))?;
+
+                if should_panic {
+                    let layout = self.layout_of(ty)?;
+
+                    let msg = match requirement {
+                        // For *all* intrinsics we first check `is_uninhabited` to give a more specific
+                        // error message.
+                        _ if layout.abi.is_uninhabited() => format!(
                             "aborted execution: attempted to instantiate uninhabited type `{}`",
                             ty
                         ),
-                    )?;
-                }
-
-                if intrinsic_name == sym::assert_zero_valid {
-                    let should_panic = !self.tcx.permits_zero_init(layout);
-
-                    if should_panic {
-                        M::abort(
-                            self,
-                            format!(
-                                "aborted execution: attempted to zero-initialize type `{}`, which is invalid",
-                                ty
-                            ),
-                        )?;
-                    }
-                }
+                        ValidityRequirement::Inhabited => bug!("handled earlier"),
+                        ValidityRequirement::Zero => format!(
+                            "aborted execution: attempted to zero-initialize type `{}`, which is invalid",
+                            ty
+                        ),
+                        ValidityRequirement::UninitMitigated0x01Fill => format!(
+                            "aborted execution: attempted to leave type `{}` uninitialized, which is invalid",
+                            ty
+                        ),
+                        ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"),
+                    };
 
-                if intrinsic_name == sym::assert_uninit_valid {
-                    let should_panic = !self.tcx.permits_uninit_init(layout);
-
-                    if should_panic {
-                        M::abort(
-                            self,
-                            format!(
-                                "aborted execution: attempted to leave type `{}` uninitialized, which is invalid",
-                                ty
-                            ),
-                        )?;
-                    }
+                    M::abort(self, msg)?;
                 }
             }
             sym::simd_insert => {
@@ -480,7 +458,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 assert_eq!(input_len, dest_len, "Return vector length must match input length");
                 assert!(
                     index < dest_len,
-                    "Index `{}` must be in bounds of vector with length {}`",
+                    "Index `{}` must be in bounds of vector with length {}",
                     index,
                     dest_len
                 );
@@ -500,7 +478,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let (input, input_len) = self.operand_to_simd(&args[0])?;
                 assert!(
                     index < input_len,
-                    "index `{}` must be in bounds of vector with length `{}`",
+                    "index `{}` must be in bounds of vector with length {}",
                     index,
                     input_len
                 );
@@ -522,12 +500,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             sym::vtable_size => {
                 let ptr = self.read_pointer(&args[0])?;
                 let (size, _align) = self.get_vtable_size_and_align(ptr)?;
-                self.write_scalar(Scalar::from_machine_usize(size.bytes(), self), dest)?;
+                self.write_scalar(Scalar::from_target_usize(size.bytes(), self), dest)?;
             }
             sym::vtable_align => {
                 let ptr = self.read_pointer(&args[0])?;
                 let (_size, align) = self.get_vtable_size_and_align(ptr)?;
-                self.write_scalar(Scalar::from_machine_usize(align.bytes(), self), dest)?;
+                self.write_scalar(Scalar::from_target_usize(align.bytes(), self), dest)?;
             }
 
             _ => return Ok(false),
@@ -666,10 +644,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
         nonoverlapping: bool,
     ) -> InterpResult<'tcx> {
-        let count = self.read_scalar(&count)?.to_machine_usize(self)?;
+        let count = self.read_target_usize(&count)?;
         let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
         let (size, align) = (layout.size, layout.align.abi);
-        // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max),
+        // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
         // but no actual allocation can be big enough for the difference to be noticeable.
         let size = size.checked_mul(count, self).ok_or_else(|| {
             err_ub_format!(
@@ -694,9 +672,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
         let dst = self.read_pointer(&dst)?;
         let byte = self.read_scalar(&byte)?.to_u8()?;
-        let count = self.read_scalar(&count)?.to_machine_usize(self)?;
+        let count = self.read_target_usize(&count)?;
 
-        // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max),
+        // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
         // but no actual allocation can be big enough for the difference to be noticeable.
         let len = layout
             .size
@@ -713,7 +691,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
     ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
         let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
-        assert!(!layout.is_unsized());
+        assert!(layout.is_sized());
 
         let get_bytes = |this: &InterpCx<'mir, 'tcx, M>,
                          op: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
index 0e3867557ad..cf52299b7ba 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
@@ -1,5 +1,3 @@
-use std::convert::TryFrom;
-
 use rustc_ast::Mutability;
 use rustc_hir::lang_items::LangItem;
 use rustc_middle::mir::TerminatorKind;
@@ -19,8 +17,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             debug!("find_closest_untracked_caller_location: checking frame {:?}", frame.instance);
 
             // Assert that the frame we look at is actually executing code currently
-            // (`loc` is `Err` when we are unwinding and the frame does not require cleanup).
-            let loc = frame.loc.unwrap();
+            // (`loc` is `Right` when we are unwinding and the frame does not require cleanup).
+            let loc = frame.loc.left().unwrap();
 
             // This could be a non-`Call` terminator (such as `Drop`), or not a terminator at all
             // (such as `box`). Use the normal span by default.
@@ -80,13 +78,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         col: u32,
     ) -> MPlaceTy<'tcx, M::Provenance> {
         let loc_details = &self.tcx.sess.opts.unstable_opts.location_detail;
+        // This can fail if rustc runs out of memory right here. Trying to emit an error would be
+        // pointless, since that would require allocating more memory than these short strings.
         let file = if loc_details.file {
             self.allocate_str(filename.as_str(), MemoryKind::CallerLocation, Mutability::Not)
+                .unwrap()
         } else {
             // FIXME: This creates a new allocation each time. It might be preferable to
             // perform this allocation only once, and re-use the `MPlaceTy`.
             // See https://github.com/rust-lang/rust/pull/89920#discussion_r730012398
-            self.allocate_str("<redacted>", MemoryKind::CallerLocation, Mutability::Not)
+            self.allocate_str("<redacted>", MemoryKind::CallerLocation, Mutability::Not).unwrap()
         };
         let line = if loc_details.line { Scalar::from_u32(line) } else { Scalar::from_u32(0) };
         let col = if loc_details.column { Scalar::from_u32(col) } else { Scalar::from_u32(0) };
@@ -94,11 +95,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // Allocate memory for `CallerLocation` struct.
         let loc_ty = self
             .tcx
-            .bound_type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None))
-            .subst(*self.tcx, self.tcx.mk_substs([self.tcx.lifetimes.re_erased.into()].iter()));
+            .type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None))
+            .subst(*self.tcx, self.tcx.mk_substs(&[self.tcx.lifetimes.re_erased.into()]));
         let loc_layout = self.layout_of(loc_ty).unwrap();
-        // This can fail if rustc runs out of memory right here. Trying to emit an error would be
-        // pointless, since that would require allocating more memory than a Location.
         let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
 
         // Initialize fields.
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index 351152eba01..92fa59aec6e 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -10,12 +10,14 @@ use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
 use rustc_middle::mir;
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_span::def_id::DefId;
-use rustc_target::abi::Size;
+use rustc_target::abi::{Align, Size};
 use rustc_target::spec::abi::Abi as CallAbi;
 
+use crate::const_eval::CheckAlignment;
+
 use super::{
-    AllocId, AllocRange, Allocation, ConstAllocation, Frame, ImmTy, InterpCx, InterpResult,
-    MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind,
+    AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, Frame, ImmTy, InterpCx,
+    InterpResult, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind,
 };
 
 /// Data returned by Machine::stack_pop,
@@ -103,10 +105,16 @@ pub trait Machine<'mir, 'tcx>: Sized {
     /// Extra data stored in every allocation.
     type AllocExtra: Debug + Clone + 'static;
 
+    /// Type for the bytes of the allocation.
+    type Bytes: AllocBytes + 'static;
+
     /// Memory's allocation map
     type MemoryMap: AllocMap<
             AllocId,
-            (MemoryKind<Self::MemoryKind>, Allocation<Self::Provenance, Self::AllocExtra>),
+            (
+                MemoryKind<Self::MemoryKind>,
+                Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>,
+            ),
         > + Default
         + Clone;
 
@@ -122,7 +130,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
     const PANIC_ON_ALLOC_FAIL: bool;
 
     /// Whether memory accesses should be alignment-checked.
-    fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+    fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment;
 
     /// Whether, when checking alignment, we should look at the actual address and thus support
     /// custom alignment logic based on whatever the integer address happens to be.
@@ -130,6 +138,13 @@ pub trait Machine<'mir, 'tcx>: Sized {
     /// If this returns true, Provenance::OFFSET_IS_ADDR must be true.
     fn use_addr_for_alignment_check(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
 
+    fn alignment_check_failed(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        has: Align,
+        required: Align,
+        check: CheckAlignment,
+    ) -> InterpResult<'tcx, ()>;
+
     /// Whether to enforce the validity invariant
     fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
 
@@ -138,8 +153,9 @@ pub trait Machine<'mir, 'tcx>: Sized {
         true
     }
 
-    /// Whether CheckedBinOp MIR statements should actually check for overflow.
-    fn checked_binop_checks_overflow(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+    /// Whether Assert(OverflowNeg) and Assert(Overflow) MIR terminators should actually
+    /// check for overflow.
+    fn ignore_checkable_overflow_assertions(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
 
     /// Entry point for obtaining the MIR of anything that should get evaluated.
     /// So not just functions and shims, but also const/static initializers, anonymous
@@ -171,7 +187,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
         unwind: StackPopUnwind,
     ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>>;
 
-    /// Execute `fn_val`.  It is the hook's responsibility to advance the instruction
+    /// Execute `fn_val`. It is the hook's responsibility to advance the instruction
     /// pointer as appropriate.
     fn call_extra_fn(
         ecx: &mut InterpCx<'mir, 'tcx, Self>,
@@ -235,12 +251,18 @@ pub trait Machine<'mir, 'tcx>: Sized {
     }
 
     /// Called before a basic block terminator is executed.
-    /// You can use this to detect endlessly running programs.
     #[inline]
     fn before_terminator(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
         Ok(())
     }
 
+    /// Called when the interpreter encounters a `StatementKind::ConstEvalCounter` instruction.
+    /// You can use this to detect long or endlessly running programs.
+    #[inline]
+    fn increment_const_eval_counter(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
     /// Called before a global allocation is accessed.
     /// `def_id` is `Some` if this is the "lazy" allocation of a static.
     #[inline]
@@ -276,7 +298,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
     fn adjust_alloc_base_pointer(
         ecx: &InterpCx<'mir, 'tcx, Self>,
         ptr: Pointer,
-    ) -> Pointer<Self::Provenance>;
+    ) -> InterpResult<'tcx, Pointer<Self::Provenance>>;
 
     /// "Int-to-pointer cast"
     fn ptr_from_addr_cast(
@@ -322,7 +344,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
         id: AllocId,
         alloc: Cow<'b, Allocation>,
         kind: Option<MemoryKind<Self::MemoryKind>>,
-    ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra>>>;
+    ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>;
 
     fn eval_inline_asm(
         _ecx: &mut InterpCx<'mir, 'tcx, Self>,
@@ -373,9 +395,21 @@ pub trait Machine<'mir, 'tcx>: Sized {
         Ok(())
     }
 
-    /// Executes a retagging operation.
+    /// Executes a retagging operation for a single pointer.
+    /// Returns the possibly adjusted pointer.
+    #[inline]
+    fn retag_ptr_value(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _kind: mir::RetagKind,
+        val: &ImmTy<'tcx, Self::Provenance>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>> {
+        Ok(val.clone())
+    }
+
+    /// Executes a retagging operation on a compound value.
+    /// Replaces all pointers stored in the given place.
     #[inline]
-    fn retag(
+    fn retag_place_contents(
         _ecx: &mut InterpCx<'mir, 'tcx, Self>,
         _kind: mir::RetagKind,
         _place: &PlaceTy<'tcx, Self::Provenance>,
@@ -417,8 +451,8 @@ pub trait Machine<'mir, 'tcx>: Sized {
     }
 }
 
-// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
-// (CTFE and ConstProp) use the same instance.  Here, we share that code.
+/// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
+/// (CTFE and ConstProp) use the same instance. Here, we share that code.
 pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
     type Provenance = AllocId;
     type ProvenanceExtra = ();
@@ -431,6 +465,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
 
     type AllocExtra = ();
     type FrameExtra = ();
+    type Bytes = Box<[u8]>;
 
     #[inline(always)]
     fn use_addr_for_alignment_check(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
@@ -439,8 +474,8 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
     }
 
     #[inline(always)]
-    fn checked_binop_checks_overflow(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
-        true
+    fn ignore_checkable_overflow_assertions(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+        false
     }
 
     #[inline(always)]
@@ -478,8 +513,8 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
     fn adjust_alloc_base_pointer(
         _ecx: &InterpCx<$mir, $tcx, Self>,
         ptr: Pointer<AllocId>,
-    ) -> Pointer<AllocId> {
-        ptr
+    ) -> InterpResult<$tcx, Pointer<AllocId>> {
+        Ok(ptr)
     }
 
     #[inline(always)]
@@ -490,7 +525,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
         // Allow these casts, but make the pointer not dereferenceable.
         // (I.e., they behave like transmutation.)
         // This is correct because no pointers can ever be exposed in compile-time evaluation.
-        Ok(Pointer::from_addr(addr))
+        Ok(Pointer::from_addr_invalid(addr))
     }
 
     #[inline(always)]
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index e5e015c1e18..a3764a7d142 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -18,9 +18,12 @@ use rustc_middle::mir::display_allocation;
 use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TyCtxt};
 use rustc_target::abi::{Align, HasDataLayout, Size};
 
+use crate::const_eval::CheckAlignment;
+
 use super::{
-    alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc, InterpCx,
-    InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar,
+    alloc_range, AllocBytes, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg,
+    GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance,
+    Scalar,
 };
 
 #[derive(Debug, PartialEq, Copy, Clone)]
@@ -112,16 +115,16 @@ pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
 /// A reference to some allocation that was already bounds-checked for the given region
 /// and had the on-access machine hooks run.
 #[derive(Copy, Clone)]
-pub struct AllocRef<'a, 'tcx, Prov, Extra> {
-    alloc: &'a Allocation<Prov, Extra>,
+pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
+    alloc: &'a Allocation<Prov, Extra, Bytes>,
     range: AllocRange,
     tcx: TyCtxt<'tcx>,
     alloc_id: AllocId,
 }
 /// A reference to some allocation that was already bounds-checked for the given region
 /// and had the on-access machine hooks run.
-pub struct AllocRefMut<'a, 'tcx, Prov, Extra> {
-    alloc: &'a mut Allocation<Prov, Extra>,
+pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
+    alloc: &'a mut Allocation<Prov, Extra, Bytes>,
     range: AllocRange,
     tcx: TyCtxt<'tcx>,
     alloc_id: AllocId,
@@ -144,7 +147,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
 
 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
-    /// the machine pointer to the allocation.  Must never be used
+    /// the machine pointer to the allocation. Must never be used
     /// for any other pointers, nor for TLS statics.
     ///
     /// Using the resulting pointer represents a *direct* access to that memory
@@ -169,7 +172,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             _ => {}
         }
         // And we need to get the provenance.
-        Ok(M::adjust_alloc_base_pointer(self, ptr))
+        M::adjust_alloc_base_pointer(self, ptr)
     }
 
     pub fn create_fn_alloc_ptr(
@@ -198,8 +201,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         kind: MemoryKind<M::MemoryKind>,
     ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
         let alloc = Allocation::uninit(size, align, M::PANIC_ON_ALLOC_FAIL)?;
-        // We can `unwrap` since `alloc` contains no pointers.
-        Ok(self.allocate_raw_ptr(alloc, kind).unwrap())
+        self.allocate_raw_ptr(alloc, kind)
     }
 
     pub fn allocate_bytes_ptr(
@@ -208,10 +210,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         align: Align,
         kind: MemoryKind<M::MemoryKind>,
         mutability: Mutability,
-    ) -> Pointer<M::Provenance> {
+    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
         let alloc = Allocation::from_bytes(bytes, align, mutability);
-        // We can `unwrap` since `alloc` contains no pointers.
-        self.allocate_raw_ptr(alloc, kind).unwrap()
+        self.allocate_raw_ptr(alloc, kind)
     }
 
     /// This can fail only of `alloc` contains provenance.
@@ -228,7 +229,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         );
         let alloc = M::adjust_allocation(self, id, Cow::Owned(alloc), Some(kind))?;
         self.memory.alloc_map.insert(id, (kind, alloc.into_owned()));
-        Ok(M::adjust_alloc_base_pointer(self, Pointer::from(id)))
+        M::adjust_alloc_base_pointer(self, Pointer::from(id))
     }
 
     pub fn reallocate_ptr(
@@ -302,9 +303,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             .into());
         };
 
-        debug!(?alloc);
-
-        if alloc.mutability == Mutability::Not {
+        if alloc.mutability.is_not() {
             throw_ub_format!("deallocating immutable allocation {alloc_id:?}");
         }
         if alloc_kind != kind {
@@ -351,11 +350,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         size: Size,
         align: Align,
     ) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
-        let align = M::enforce_alignment(&self).then_some(align);
         self.check_and_deref_ptr(
             ptr,
             size,
             align,
+            M::enforce_alignment(self),
             CheckInAllocMsg::MemoryAccessTest,
             |alloc_id, offset, prov| {
                 let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
@@ -375,10 +374,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         align: Align,
         msg: CheckInAllocMsg,
     ) -> InterpResult<'tcx> {
-        self.check_and_deref_ptr(ptr, size, Some(align), msg, |alloc_id, _, _| {
-            let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
-            Ok((size, align, ()))
-        })?;
+        self.check_and_deref_ptr(
+            ptr,
+            size,
+            align,
+            CheckAlignment::Error,
+            msg,
+            |alloc_id, _, _| {
+                let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
+                Ok((size, align, ()))
+            },
+        )?;
         Ok(())
     }
 
@@ -390,7 +396,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         &self,
         ptr: Pointer<Option<M::Provenance>>,
         size: Size,
-        align: Option<Align>,
+        align: Align,
+        check: CheckAlignment,
         msg: CheckInAllocMsg,
         alloc_size: impl FnOnce(
             AllocId,
@@ -398,19 +405,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             M::ProvenanceExtra,
         ) -> InterpResult<'tcx, (Size, Align, T)>,
     ) -> InterpResult<'tcx, Option<T>> {
-        fn check_offset_align<'tcx>(offset: u64, align: Align) -> InterpResult<'tcx> {
-            if offset % align.bytes() == 0 {
-                Ok(())
-            } else {
-                // The biggest power of two through which `offset` is divisible.
-                let offset_pow2 = 1 << offset.trailing_zeros();
-                throw_ub!(AlignmentCheckFailed {
-                    has: Align::from_bytes(offset_pow2).unwrap(),
-                    required: align,
-                })
-            }
-        }
-
         Ok(match self.ptr_try_get_alloc_id(ptr) {
             Err(addr) => {
                 // We couldn't get a proper allocation. This is only okay if the access size is 0,
@@ -419,8 +413,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     throw_ub!(DanglingIntPointer(addr, msg));
                 }
                 // Must be aligned.
-                if let Some(align) = align {
-                    check_offset_align(addr, align)?;
+                if check.should_check() {
+                    self.check_offset_align(addr, align, check)?;
                 }
                 None
             }
@@ -432,7 +426,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     throw_ub!(PointerOutOfBounds {
                         alloc_id,
                         alloc_size,
-                        ptr_offset: self.machine_usize_to_isize(offset.bytes()),
+                        ptr_offset: self.target_usize_to_isize(offset.bytes()),
                         ptr_size: size,
                         msg,
                     })
@@ -443,16 +437,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 }
                 // Test align. Check this last; if both bounds and alignment are violated
                 // we want the error to be about the bounds.
-                if let Some(align) = align {
+                if check.should_check() {
                     if M::use_addr_for_alignment_check(self) {
                         // `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
-                        check_offset_align(ptr.addr().bytes(), align)?;
+                        self.check_offset_align(ptr.addr().bytes(), align, check)?;
                     } else {
                         // Check allocation alignment and offset alignment.
                         if alloc_align.bytes() < align.bytes() {
-                            throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align });
+                            M::alignment_check_failed(self, alloc_align, align, check)?;
                         }
-                        check_offset_align(offset.bytes(), align)?;
+                        self.check_offset_align(offset.bytes(), align, check)?;
                     }
                 }
 
@@ -462,6 +456,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
         })
     }
+
+    fn check_offset_align(
+        &self,
+        offset: u64,
+        align: Align,
+        check: CheckAlignment,
+    ) -> InterpResult<'tcx> {
+        if offset % align.bytes() == 0 {
+            Ok(())
+        } else {
+            // The biggest power of two through which `offset` is divisible.
+            let offset_pow2 = 1 << offset.trailing_zeros();
+            M::alignment_check_failed(self, Align::from_bytes(offset_pow2).unwrap(), align, check)
+        }
+    }
 }
 
 /// Allocation accessors
@@ -475,7 +484,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         &self,
         id: AllocId,
         is_write: bool,
-    ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra>>> {
+    ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra, M::Bytes>>> {
         let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) {
             Some(GlobalAlloc::Memory(mem)) => {
                 // Memory of a constant or promoted or anonymous memory referenced by a static.
@@ -503,8 +512,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     throw_unsup!(ReadExternStatic(def_id));
                 }
 
-                // Use a precise span for better cycle errors.
-                (self.tcx.at(self.cur_span()).eval_static_initializer(def_id)?, Some(def_id))
+                // We don't give a span -- statics don't need that, they cannot be generic or associated.
+                let val = self.ctfe_query(None, |tcx| tcx.eval_static_initializer(def_id))?;
+                (val, Some(def_id))
             }
         };
         M::before_access_global(*self.tcx, &self.machine, id, alloc, def_id, is_write)?;
@@ -517,6 +527,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         )
     }
 
+    /// Get the base address for the bytes in an `Allocation` specified by the
+    /// `AllocID` passed in; error if no such allocation exists.
+    ///
+    /// It is up to the caller to take sufficient care when using this address:
+    /// there could be provenance or uninit memory in there, and other memory
+    /// accesses could invalidate the exposed pointer.
+    pub fn alloc_base_addr(&self, id: AllocId) -> InterpResult<'tcx, *const u8> {
+        let alloc = self.get_alloc_raw(id)?;
+        Ok(alloc.base_addr())
+    }
+
     /// Gives raw access to the `Allocation`, without bounds or alignment checks.
     /// The caller is responsible for calling the access hooks!
     ///
@@ -524,8 +545,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     fn get_alloc_raw(
         &self,
         id: AllocId,
-    ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra>> {
-        // The error type of the inner closure here is somewhat funny.  We have two
+    ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra, M::Bytes>> {
+        // The error type of the inner closure here is somewhat funny. We have two
         // ways of "erroring": An actual error, or because we got a reference from
         // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
         // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
@@ -560,12 +581,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         ptr: Pointer<Option<M::Provenance>>,
         size: Size,
         align: Align,
-    ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra>>> {
-        let align = M::enforce_alignment(self).then_some(align);
+    ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
+    {
         let ptr_and_alloc = self.check_and_deref_ptr(
             ptr,
             size,
             align,
+            M::enforce_alignment(self),
             CheckInAllocMsg::MemoryAccessTest,
             |alloc_id, offset, prov| {
                 let alloc = self.get_alloc_raw(alloc_id)?;
@@ -603,7 +625,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     fn get_alloc_raw_mut(
         &mut self,
         id: AllocId,
-    ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra>, &mut M)> {
+    ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra, M::Bytes>, &mut M)> {
         // We have "NLL problem case #3" here, which cannot be worked around without loss of
         // efficiency even for the common case where the key is in the map.
         // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
@@ -620,7 +642,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         }
 
         let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
-        if alloc.mutability == Mutability::Not {
+        if alloc.mutability.is_not() {
             throw_ub!(WriteToReadOnly(id))
         }
         Ok((alloc, &mut self.machine))
@@ -632,7 +654,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         ptr: Pointer<Option<M::Provenance>>,
         size: Size,
         align: Align,
-    ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra>>> {
+    ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
+    {
         let parts = self.get_ptr_access(ptr, size, align)?;
         if let Some((alloc_id, offset, prov)) = parts {
             let tcx = *self.tcx;
@@ -681,9 +704,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 assert!(self.tcx.is_static(def_id));
                 assert!(!self.tcx.is_thread_local_static(def_id));
                 // Use size and align of the type.
-                let ty = self.tcx.type_of(def_id);
+                let ty = self
+                    .tcx
+                    .type_of(def_id)
+                    .no_bound_vars()
+                    .expect("statics should not have generic parameters");
                 let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
-                assert!(!layout.is_unsized());
+                assert!(layout.is_sized());
                 (layout.size, layout.align.abi, AllocKind::LiveData)
             }
             Some(GlobalAlloc::Memory(alloc)) => {
@@ -797,7 +824,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     // This is a new allocation, add the allocation it points to `todo`.
                     if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
                         todo.extend(
-                            alloc.provenance().values().filter_map(|prov| prov.get_alloc_id()),
+                            alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()),
                         );
                     }
                 }
@@ -827,13 +854,14 @@ pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
     fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
         // Cannot be a closure because it is generic in `Prov`, `Extra`.
-        fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra>(
+        fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
             fmt: &mut std::fmt::Formatter<'_>,
             tcx: TyCtxt<'tcx>,
             allocs_to_print: &mut VecDeque<AllocId>,
-            alloc: &Allocation<Prov, Extra>,
+            alloc: &Allocation<Prov, Extra, Bytes>,
         ) -> std::fmt::Result {
-            for alloc_id in alloc.provenance().values().filter_map(|prov| prov.get_alloc_id()) {
+            for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())
+            {
                 allocs_to_print.push_back(alloc_id);
             }
             write!(fmt, "{}", display_allocation(tcx, alloc))
@@ -851,7 +879,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a,
 
             write!(fmt, "{id:?}")?;
             match self.ecx.memory.alloc_map.get(id) {
-                Some(&(kind, ref alloc)) => {
+                Some((kind, alloc)) => {
                     // normal alloc
                     write!(fmt, " ({}, ", kind)?;
                     write_allocation_track_relocs(
@@ -898,7 +926,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a,
 }
 
 /// Reading and writing.
-impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> {
+impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes>
+    AllocRefMut<'a, 'tcx, Prov, Extra, Bytes>
+{
     /// `range` is relative to this allocation reference, not the base of the allocation.
     pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
         let range = self.range.subrange(range);
@@ -923,7 +953,7 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> {
     }
 }
 
-impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> {
+impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
     /// `range` is relative to this allocation reference, not the base of the allocation.
     pub fn read_scalar(
         &self,
@@ -962,7 +992,7 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> {
 
     /// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
     pub(crate) fn has_provenance(&self) -> bool {
-        self.alloc.range_has_provenance(&self.tcx, self.range)
+        !self.alloc.provenance().range_empty(self.range, &self.tcx)
     }
 }
 
@@ -1060,7 +1090,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
         // Source alloc preparations and access hooks.
         let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
-            // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
+            // Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
             return Ok(());
         };
         let src_alloc = self.get_alloc_raw(src_alloc_id)?;
@@ -1079,22 +1109,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             return Ok(());
         };
 
-        // Checks provenance edges on the src, which needs to happen before
-        // `prepare_provenance_copy`.
-        if src_alloc.range_has_provenance(&tcx, alloc_range(src_range.start, Size::ZERO)) {
-            throw_unsup!(PartialPointerCopy(Pointer::new(src_alloc_id, src_range.start)));
-        }
-        if src_alloc.range_has_provenance(&tcx, alloc_range(src_range.end(), Size::ZERO)) {
-            throw_unsup!(PartialPointerCopy(Pointer::new(src_alloc_id, src_range.end())));
-        }
+        // Prepare getting source provenance.
         let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
         // first copy the provenance to a temporary buffer, because
         // `get_bytes_mut` will clear the provenance, which is correct,
         // since we don't want to keep any provenance at the target.
-        let provenance =
-            src_alloc.prepare_provenance_copy(self, src_range, dest_offset, num_copies);
+        // This will also error if copying partial provenance is not supported.
+        let provenance = src_alloc
+            .provenance()
+            .prepare_copy(src_range, dest_offset, num_copies, self)
+            .map_err(|e| e.to_interp_error(dest_alloc_id))?;
         // Prepare a copy of the initialization mask.
-        let compressed = src_alloc.compress_uninit_range(src_range);
+        let init = src_alloc.init_mask().prepare_copy(src_range);
 
         // Destination alloc preparations and access hooks.
         let (dest_alloc, extra) = self.get_alloc_raw_mut(dest_alloc_id)?;
@@ -1111,7 +1137,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             .map_err(|e| e.to_interp_error(dest_alloc_id))?
             .as_mut_ptr();
 
-        if compressed.no_bytes_init() {
+        if init.no_bytes_init() {
             // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
             // is marked as uninitialized but we otherwise omit changing the byte representation which may
             // be arbitrary for uninitialized bytes.
@@ -1160,13 +1186,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         }
 
         // now fill in all the "init" data
-        dest_alloc.mark_compressed_init_range(
-            &compressed,
+        dest_alloc.init_mask_apply_copy(
+            init,
             alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
             num_copies,
         );
         // copy the provenance to the destination
-        dest_alloc.mark_provenance_range(provenance);
+        dest_alloc.provenance_apply_copy(provenance);
 
         Ok(())
     }
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index 2e356f67bf3..86de4e4e32c 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -1,6 +1,7 @@
 //! An interpreter for MIR used in CTFE and by miri
 
 mod cast;
+mod discriminant;
 mod eval_context;
 mod intern;
 mod intrinsics;
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index dd00678aa0c..8d5192bca67 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -1,13 +1,15 @@
 //! Functions concerning immediate values and operands, and reading from operands.
 //! All high-level functions to read from memory work on operands as sources.
 
+use either::{Either, Left, Right};
+
 use rustc_hir::def::Namespace;
-use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
-use rustc_middle::ty::{ConstInt, Ty};
+use rustc_middle::ty::{ConstInt, Ty, ValTree};
 use rustc_middle::{mir, ty};
-use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding};
-use rustc_target::abi::{VariantIdx, Variants};
+use rustc_span::Span;
+use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
 
 use super::{
     alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
@@ -36,7 +38,7 @@ pub enum Immediate<Prov: Provenance = AllocId> {
 impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> {
     #[inline(always)]
     fn from(val: Scalar<Prov>) -> Self {
-        Immediate::Scalar(val.into())
+        Immediate::Scalar(val)
     }
 }
 
@@ -50,7 +52,7 @@ impl<Prov: Provenance> Immediate<Prov> {
     }
 
     pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self {
-        Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
+        Immediate::ScalarPair(val, Scalar::from_target_usize(len, cx))
     }
 
     pub fn new_dyn_trait(
@@ -58,7 +60,7 @@ impl<Prov: Provenance> Immediate<Prov> {
         vtable: Pointer<Option<Prov>>,
         cx: &impl HasDataLayout,
     ) -> Self {
-        Immediate::ScalarPair(val.into(), Scalar::from_maybe_pointer(vtable, cx))
+        Immediate::ScalarPair(val, Scalar::from_maybe_pointer(vtable, cx))
     }
 
     #[inline]
@@ -253,16 +255,31 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
         }
     }
 
-    pub fn offset_with_meta(
+    /// Replace the layout of this operand. There's basically no sanity check that this makes sense,
+    /// you better know what you are doing! If this is an immediate, applying the wrong layout can
+    /// not just lead to invalid data, it can actually *shift the data around* since the offsets of
+    /// a ScalarPair are entirely determined by the layout, not the data.
+    pub fn transmute(&self, layout: TyAndLayout<'tcx>) -> Self {
+        assert_eq!(
+            self.layout.size, layout.size,
+            "transmuting with a size change, that doesn't seem right"
+        );
+        OpTy { layout, ..*self }
+    }
+
+    /// Offset the operand in memory (if possible) and change its metadata.
+    ///
+    /// This can go wrong very easily if you give the wrong layout for the new place!
+    pub(super) fn offset_with_meta(
         &self,
         offset: Size,
         meta: MemPlaceMeta<Prov>,
         layout: TyAndLayout<'tcx>,
         cx: &impl HasDataLayout,
     ) -> InterpResult<'tcx, Self> {
-        match self.try_as_mplace() {
-            Ok(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()),
-            Err(imm) => {
+        match self.as_mplace_or_imm() {
+            Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()),
+            Right(imm) => {
                 assert!(
                     matches!(*imm, Immediate::Uninit),
                     "Scalar/ScalarPair cannot be offset into"
@@ -274,13 +291,16 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
         }
     }
 
+    /// Offset the operand in memory (if possible).
+    ///
+    /// This can go wrong very easily if you give the wrong layout for the new place!
     pub fn offset(
         &self,
         offset: Size,
         layout: TyAndLayout<'tcx>,
         cx: &impl HasDataLayout,
     ) -> InterpResult<'tcx, Self> {
-        assert!(!layout.is_unsized());
+        assert!(layout.is_sized());
         self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
     }
 }
@@ -316,7 +336,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
                 let scalar = alloc.read_scalar(
                     alloc_range(Size::ZERO, size),
-                    /*read_provenance*/ s.is_ptr(),
+                    /*read_provenance*/ matches!(s, abi::Pointer(_)),
                 )?;
                 Some(ImmTy { imm: scalar.into(), layout: mplace.layout })
             }
@@ -332,16 +352,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
                 let a_val = alloc.read_scalar(
                     alloc_range(Size::ZERO, a_size),
-                    /*read_provenance*/ a.is_ptr(),
+                    /*read_provenance*/ matches!(a, abi::Pointer(_)),
                 )?;
                 let b_val = alloc.read_scalar(
                     alloc_range(b_offset, b_size),
-                    /*read_provenance*/ b.is_ptr(),
+                    /*read_provenance*/ matches!(b, abi::Pointer(_)),
                 )?;
-                Some(ImmTy {
-                    imm: Immediate::ScalarPair(a_val.into(), b_val.into()),
-                    layout: mplace.layout,
-                })
+                Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout })
             }
             _ => {
                 // Neither a scalar nor scalar pair.
@@ -352,8 +369,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
     /// Try returning an immediate for the operand. If the layout does not permit loading this as an
     /// immediate, return where in memory we can find the data.
-    /// Note that for a given layout, this operation will either always fail or always
-    /// succeed!  Whether it succeeds depends on whether the layout can be represented
+    /// Note that for a given layout, this operation will either always return Left or Right!
+    /// succeed!  Whether it returns Left depends on whether the layout can be represented
     /// in an `Immediate`, not on which data is stored there currently.
     ///
     /// This is an internal function that should not usually be used; call `read_immediate` instead.
@@ -361,22 +378,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     pub fn read_immediate_raw(
         &self,
         src: &OpTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::Provenance>, MPlaceTy<'tcx, M::Provenance>>> {
-        Ok(match src.try_as_mplace() {
-            Ok(ref mplace) => {
+    ) -> InterpResult<'tcx, Either<MPlaceTy<'tcx, M::Provenance>, ImmTy<'tcx, M::Provenance>>> {
+        Ok(match src.as_mplace_or_imm() {
+            Left(ref mplace) => {
                 if let Some(val) = self.read_immediate_from_mplace_raw(mplace)? {
-                    Ok(val)
+                    Right(val)
                 } else {
-                    Err(*mplace)
+                    Left(*mplace)
                 }
             }
-            Err(val) => Ok(val),
+            Right(val) => Right(val),
         })
     }
 
     /// Read an immediate from a place, asserting that that is possible with the given layout.
     ///
-    /// If this suceeds, the `ImmTy` is never `Uninit`.
+    /// If this succeeds, the `ImmTy` is never `Uninit`.
     #[inline(always)]
     pub fn read_immediate(
         &self,
@@ -389,7 +406,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         ) {
             span_bug!(self.cur_span(), "primitive read not possible for type: {:?}", op.layout.ty);
         }
-        let imm = self.read_immediate_raw(op)?.unwrap();
+        let imm = self.read_immediate_raw(op)?.right().unwrap();
         if matches!(*imm, Immediate::Uninit) {
             throw_ub!(InvalidUninitBytes(None));
         }
@@ -404,6 +421,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         Ok(self.read_immediate(op)?.to_scalar())
     }
 
+    // Pointer-sized reads are fairly common and need target layout access, so we wrap them in
+    // convenience functions.
+
     /// Read a pointer from a place.
     pub fn read_pointer(
         &self,
@@ -411,6 +431,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
         self.read_scalar(op)?.to_pointer(self)
     }
+    /// Read a pointer-sized unsigned integer from a place.
+    pub fn read_target_usize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, u64> {
+        self.read_scalar(op)?.to_target_usize(self)
+    }
+    /// Read a pointer-sized signed integer from a place.
+    pub fn read_target_isize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, i64> {
+        self.read_scalar(op)?.to_target_isize(self)
+    }
 
     /// Turn the wide MPlace into a string (must already be dereferenced!)
     pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> {
@@ -431,9 +459,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // Basically we just transmute this place into an array following simd_size_and_type.
         // This only works in memory, but repr(simd) types should never be immediates anyway.
         assert!(op.layout.ty.is_simd());
-        match op.try_as_mplace() {
-            Ok(mplace) => self.mplace_to_simd(&mplace),
-            Err(imm) => match *imm {
+        match op.as_mplace_or_imm() {
+            Left(mplace) => self.mplace_to_simd(&mplace),
+            Right(imm) => match *imm {
                 Immediate::Uninit => {
                     throw_ub!(InvalidUninitBytes(None))
                 }
@@ -477,7 +505,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         Ok(OpTy { op, layout: place.layout, align: Some(place.align) })
     }
 
-    /// Evaluate a place with the goal of reading from it.  This lets us sometimes
+    /// Evaluate a place with the goal of reading from it. This lets us sometimes
     /// avoid allocations.
     pub fn eval_place_to_op(
         &self,
@@ -522,19 +550,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         layout: Option<TyAndLayout<'tcx>>,
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
         use rustc_middle::mir::Operand::*;
-        let op = match *mir_op {
+        let op = match mir_op {
             // FIXME: do some more logic on `move` to invalidate the old location
-            Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?,
+            &Copy(place) | &Move(place) => self.eval_place_to_op(place, layout)?,
 
-            Constant(ref constant) => {
-                let val =
+            Constant(constant) => {
+                let c =
                     self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal)?;
 
                 // This can still fail:
                 // * During ConstProp, with `TooGeneric` or since the `required_consts` were not all
                 //   checked yet.
                 // * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
-                self.const_to_op(&val, layout)?
+                self.eval_mir_constant(&c, Some(constant.span), layout)?
             }
         };
         trace!("{:?}: {:?}", mir_op, *op);
@@ -549,9 +577,39 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         ops.iter().map(|op| self.eval_operand(op, None)).collect()
     }
 
-    pub fn const_to_op(
+    fn eval_ty_constant(
+        &self,
+        val: ty::Const<'tcx>,
+        span: Option<Span>,
+    ) -> InterpResult<'tcx, ValTree<'tcx>> {
+        Ok(match val.kind() {
+            ty::ConstKind::Param(_) | ty::ConstKind::Placeholder(..) => {
+                throw_inval!(TooGeneric)
+            }
+            // FIXME(generic_const_exprs): `ConstKind::Expr` should be able to be evaluated
+            ty::ConstKind::Expr(_) => throw_inval!(TooGeneric),
+            ty::ConstKind::Error(reported) => {
+                throw_inval!(AlreadyReported(reported))
+            }
+            ty::ConstKind::Unevaluated(uv) => {
+                let instance = self.resolve(uv.def, uv.substs)?;
+                let cid = GlobalId { instance, promoted: None };
+                self.ctfe_query(span, |tcx| {
+                    tcx.eval_to_valtree(self.param_env.with_const().and(cid))
+                })?
+                .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"))
+            }
+            ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => {
+                span_bug!(self.cur_span(), "unexpected ConstKind in ctfe: {val:?}")
+            }
+            ty::ConstKind::Value(valtree) => valtree,
+        })
+    }
+
+    pub fn eval_mir_constant(
         &self,
         val: &mir::ConstantKind<'tcx>,
+        span: Option<Span>,
         layout: Option<TyAndLayout<'tcx>>,
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
         // FIXME(const_prop): normalization needed b/c const prop lint in
@@ -563,44 +621,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let val = self.tcx.normalize_erasing_regions(self.param_env, *val);
         match val {
             mir::ConstantKind::Ty(ct) => {
-                match ct.kind() {
-                    ty::ConstKind::Param(_) | ty::ConstKind::Placeholder(..) => {
-                        throw_inval!(TooGeneric)
-                    }
-                    ty::ConstKind::Error(reported) => {
-                        throw_inval!(AlreadyReported(reported))
-                    }
-                    ty::ConstKind::Unevaluated(uv) => {
-                        // NOTE: We evaluate to a `ValTree` here as a check to ensure
-                        // we're working with valid constants, even though we never need it.
-                        let instance = self.resolve(uv.def, uv.substs)?;
-                        let cid = GlobalId { instance, promoted: None };
-                        let _valtree = self
-                            .tcx
-                            .eval_to_valtree(self.param_env.and(cid))?
-                            .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"));
-
-                        Ok(self.eval_to_allocation(cid)?.into())
-                    }
-                    ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => {
-                        span_bug!(self.cur_span(), "unexpected ConstKind in ctfe: {ct:?}")
-                    }
-                    ty::ConstKind::Value(valtree) => {
-                        let ty = ct.ty();
-                        let const_val = self.tcx.valtree_to_const_val((ty, valtree));
-                        self.const_val_to_op(const_val, ty, layout)
-                    }
-                }
+                let ty = ct.ty();
+                let valtree = self.eval_ty_constant(ct, span)?;
+                let const_val = self.tcx.valtree_to_const_val((ty, valtree));
+                self.const_val_to_op(const_val, ty, layout)
             }
             mir::ConstantKind::Val(val, ty) => self.const_val_to_op(val, ty, layout),
             mir::ConstantKind::Unevaluated(uv, _) => {
                 let instance = self.resolve(uv.def, uv.substs)?;
-                Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into())
+                Ok(self.eval_global(GlobalId { instance, promoted: uv.promoted }, span)?.into())
             }
         }
     }
 
-    pub(crate) fn const_val_to_op(
+    pub(super) fn const_val_to_op(
         &self,
         val_val: ConstValue<'tcx>,
         ty: Ty<'tcx>,
@@ -640,154 +674,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         };
         Ok(OpTy { op, layout, align: Some(layout.align.abi) })
     }
-
-    /// Read discriminant, return the runtime value as well as the variant index.
-    /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
-    pub fn read_discriminant(
-        &self,
-        op: &OpTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
-        trace!("read_discriminant_value {:#?}", op.layout);
-        // Get type and layout of the discriminant.
-        let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
-        trace!("discriminant type: {:?}", discr_layout.ty);
-
-        // We use "discriminant" to refer to the value associated with a particular enum variant.
-        // This is not to be confused with its "variant index", which is just determining its position in the
-        // declared list of variants -- they can differ with explicitly assigned discriminants.
-        // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
-        // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
-        let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
-            Variants::Single { index } => {
-                let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
-                    Some(discr) => {
-                        // This type actually has discriminants.
-                        assert_eq!(discr.ty, discr_layout.ty);
-                        Scalar::from_uint(discr.val, discr_layout.size)
-                    }
-                    None => {
-                        // On a type without actual discriminants, variant is 0.
-                        assert_eq!(index.as_u32(), 0);
-                        Scalar::from_uint(index.as_u32(), discr_layout.size)
-                    }
-                };
-                return Ok((discr, index));
-            }
-            Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
-                (tag, tag_encoding, tag_field)
-            }
-        };
-
-        // There are *three* layouts that come into play here:
-        // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
-        //   the `Scalar` we return.
-        // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
-        //   and used to interpret the value we read from the tag field.
-        //   For the return value, a cast to `discr_layout` is performed.
-        // - The field storing the tag has a layout, which is very similar to `tag_layout` but
-        //   may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
-
-        // Get layout for tag.
-        let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
-
-        // Read tag and sanity-check `tag_layout`.
-        let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
-        assert_eq!(tag_layout.size, tag_val.layout.size);
-        assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
-        trace!("tag value: {}", tag_val);
-
-        // Figure out which discriminant and variant this corresponds to.
-        Ok(match *tag_encoding {
-            TagEncoding::Direct => {
-                let scalar = tag_val.to_scalar();
-                // Generate a specific error if `tag_val` is not an integer.
-                // (`tag_bits` itself is only used for error messages below.)
-                let tag_bits = scalar
-                    .try_to_int()
-                    .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
-                    .assert_bits(tag_layout.size);
-                // Cast bits from tag layout to discriminant layout.
-                // After the checks we did above, this cannot fail, as
-                // discriminants are int-like.
-                let discr_val =
-                    self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
-                let discr_bits = discr_val.assert_bits(discr_layout.size);
-                // Convert discriminant to variant index, and catch invalid discriminants.
-                let index = match *op.layout.ty.kind() {
-                    ty::Adt(adt, _) => {
-                        adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
-                    }
-                    ty::Generator(def_id, substs, _) => {
-                        let substs = substs.as_generator();
-                        substs
-                            .discriminants(def_id, *self.tcx)
-                            .find(|(_, var)| var.val == discr_bits)
-                    }
-                    _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
-                }
-                .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
-                // Return the cast value, and the index.
-                (discr_val, index.0)
-            }
-            TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
-                let tag_val = tag_val.to_scalar();
-                // Compute the variant this niche value/"tag" corresponds to. With niche layout,
-                // discriminant (encoded in niche/tag) and variant index are the same.
-                let variants_start = niche_variants.start().as_u32();
-                let variants_end = niche_variants.end().as_u32();
-                let variant = match tag_val.try_to_int() {
-                    Err(dbg_val) => {
-                        // So this is a pointer then, and casting to an int failed.
-                        // Can only happen during CTFE.
-                        // The niche must be just 0, and the ptr not null, then we know this is
-                        // okay. Everything else, we conservatively reject.
-                        let ptr_valid = niche_start == 0
-                            && variants_start == variants_end
-                            && !self.scalar_may_be_null(tag_val)?;
-                        if !ptr_valid {
-                            throw_ub!(InvalidTag(dbg_val))
-                        }
-                        untagged_variant
-                    }
-                    Ok(tag_bits) => {
-                        let tag_bits = tag_bits.assert_bits(tag_layout.size);
-                        // We need to use machine arithmetic to get the relative variant idx:
-                        // variant_index_relative = tag_val - niche_start_val
-                        let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
-                        let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
-                        let variant_index_relative_val =
-                            self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
-                        let variant_index_relative =
-                            variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
-                        // Check if this is in the range that indicates an actual discriminant.
-                        if variant_index_relative <= u128::from(variants_end - variants_start) {
-                            let variant_index_relative = u32::try_from(variant_index_relative)
-                                .expect("we checked that this fits into a u32");
-                            // Then computing the absolute variant idx should not overflow any more.
-                            let variant_index = variants_start
-                                .checked_add(variant_index_relative)
-                                .expect("overflow computing absolute variant idx");
-                            let variants_len = op
-                                .layout
-                                .ty
-                                .ty_adt_def()
-                                .expect("tagged layout for non adt")
-                                .variants()
-                                .len();
-                            assert!(usize::try_from(variant_index).unwrap() < variants_len);
-                            VariantIdx::from_u32(variant_index)
-                        } else {
-                            untagged_variant
-                        }
-                    }
-                };
-                // Compute the size of the scalar we need to return.
-                // No need to cast, because the variant index directly serves as discriminant and is
-                // encoded in the tag.
-                (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
-            }
-        })
-    }
 }
 
 // Some nodes are used a lot. Make sure they don't unintentionally get bigger.
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index 1f1d0665139..4decfe863e6 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -1,5 +1,3 @@
-use std::convert::TryFrom;
-
 use rustc_apfloat::Float;
 use rustc_middle::mir;
 use rustc_middle::mir::interpret::{InterpResult, Scalar};
@@ -12,33 +10,25 @@ use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// Applies the binary operation `op` to the two operands and writes a tuple of the result
     /// and a boolean signifying the potential overflow to the destination.
-    ///
-    /// `force_overflow_checks` indicates whether overflow checks should be done even when
-    /// `tcx.sess.overflow_checks()` is `false`.
     pub fn binop_with_overflow(
         &mut self,
         op: mir::BinOp,
-        force_overflow_checks: bool,
         left: &ImmTy<'tcx, M::Provenance>,
         right: &ImmTy<'tcx, M::Provenance>,
         dest: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
         let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
         debug_assert_eq!(
-            self.tcx.intern_tup(&[ty, self.tcx.types.bool]),
+            self.tcx.mk_tup(&[ty, self.tcx.types.bool]),
             dest.layout.ty,
             "type mismatch for result of {:?}",
             op,
         );
-        // As per https://github.com/rust-lang/rust/pull/98738, we always return `false` in the 2nd
-        // component when overflow checking is disabled.
-        let overflowed =
-            overflowed && (force_overflow_checks || M::checked_binop_checks_overflow(self));
         // Write the result to `dest`.
         if let Abi::ScalarPair(..) = dest.layout.abi {
             // We can use the optimized path and avoid `place_field` (which might do
             // `force_allocation`).
-            let pair = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
+            let pair = Immediate::ScalarPair(val, Scalar::from_bool(overflowed));
             self.write_immediate(pair, dest)?;
         } else {
             assert!(self.tcx.sess.opts.unstable_opts.randomize_layout);
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index b0625b5f412..3c463500a60 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -2,11 +2,13 @@
 //! into a place.
 //! All high-level functions to write to memory work on places as destinations.
 
+use either::{Either, Left, Right};
+
 use rustc_ast::Mutability;
 use rustc_middle::mir;
 use rustc_middle::ty;
-use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
-use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding, VariantIdx};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, VariantIdx};
 
 use super::{
     alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
@@ -24,6 +26,7 @@ pub enum MemPlaceMeta<Prov: Provenance = AllocId> {
 }
 
 impl<Prov: Provenance> MemPlaceMeta<Prov> {
+    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
     pub fn unwrap_meta(self) -> Scalar<Prov> {
         match self {
             Self::Meta(s) => s,
@@ -139,18 +142,22 @@ impl<Prov: Provenance> MemPlace<Prov> {
         match self.meta {
             MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
             MemPlaceMeta::Meta(meta) => {
-                Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx).into(), meta.into())
+                Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx), meta)
             }
         }
     }
 
     #[inline]
-    pub fn offset_with_meta<'tcx>(
+    pub(super) fn offset_with_meta<'tcx>(
         self,
         offset: Size,
         meta: MemPlaceMeta<Prov>,
         cx: &impl HasDataLayout,
     ) -> InterpResult<'tcx, Self> {
+        debug_assert!(
+            !meta.has_meta() || self.meta.has_meta(),
+            "cannot use `offset_with_meta` to add metadata to a place"
+        );
         Ok(MemPlace { ptr: self.ptr.offset(offset, cx)?, meta })
     }
 }
@@ -176,12 +183,15 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
     pub fn fake_alloc_zst(layout: TyAndLayout<'tcx>) -> Self {
         assert!(layout.is_zst());
         let align = layout.align.abi;
-        let ptr = Pointer::from_addr(align.bytes()); // no provenance, absolute address
+        let ptr = Pointer::from_addr_invalid(align.bytes()); // no provenance, absolute address
         MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None }, layout, align }
     }
 
+    /// Offset the place in memory and change its metadata.
+    ///
+    /// This can go wrong very easily if you give the wrong layout for the new place!
     #[inline]
-    pub fn offset_with_meta(
+    pub(crate) fn offset_with_meta(
         &self,
         offset: Size,
         meta: MemPlaceMeta<Prov>,
@@ -195,13 +205,16 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
         })
     }
 
+    /// Offset the place in memory.
+    ///
+    /// This can go wrong very easily if you give the wrong layout for the new place!
     pub fn offset(
         &self,
         offset: Size,
         layout: TyAndLayout<'tcx>,
         cx: &impl HasDataLayout,
     ) -> InterpResult<'tcx, Self> {
-        assert!(!layout.is_unsized());
+        assert!(layout.is_sized());
         self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
     }
 
@@ -227,11 +240,11 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
         if self.layout.is_unsized() {
             // We need to consult `meta` metadata
             match self.layout.ty.kind() {
-                ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_machine_usize(cx),
+                ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_target_usize(cx),
                 _ => bug!("len not supported on unsized type {:?}", self.layout.ty),
             }
         } else {
-            // Go through the layout.  There are lots of types that support a length,
+            // Go through the layout. There are lots of types that support a length,
             // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
             match self.layout.fields {
                 abi::FieldsShape::Array { count, .. } => Ok(count),
@@ -239,49 +252,51 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
             }
         }
     }
-
-    #[inline]
-    pub(super) fn vtable(&self) -> Scalar<Prov> {
-        match self.layout.ty.kind() {
-            ty::Dynamic(..) => self.mplace.meta.unwrap_meta(),
-            _ => bug!("vtable not supported on type {:?}", self.layout.ty),
-        }
-    }
 }
 
 // These are defined here because they produce a place.
 impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
     #[inline(always)]
-    pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+    pub fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
         match **self {
             Operand::Indirect(mplace) => {
-                Ok(MPlaceTy { mplace, layout: self.layout, align: self.align.unwrap() })
+                Left(MPlaceTy { mplace, layout: self.layout, align: self.align.unwrap() })
             }
-            Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)),
+            Operand::Immediate(imm) => Right(ImmTy::from_immediate(imm, self.layout)),
         }
     }
 
     #[inline(always)]
     #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
     pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
-        self.try_as_mplace().unwrap()
+        self.as_mplace_or_imm().left().unwrap_or_else(|| {
+            bug!(
+                "OpTy of type {} was immediate when it was expected to be an MPlace",
+                self.layout.ty
+            )
+        })
     }
 }
 
 impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
     /// A place is either an mplace or some local.
     #[inline]
-    pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Prov>, (usize, mir::Local)> {
+    pub fn as_mplace_or_local(&self) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local)> {
         match **self {
-            Place::Ptr(mplace) => Ok(MPlaceTy { mplace, layout: self.layout, align: self.align }),
-            Place::Local { frame, local } => Err((frame, local)),
+            Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout, align: self.align }),
+            Place::Local { frame, local } => Right((frame, local)),
         }
     }
 
     #[inline(always)]
     #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
     pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
-        self.try_as_mplace().unwrap()
+        self.as_mplace_or_local().left().unwrap_or_else(|| {
+            bug!(
+                "PlaceTy of type {} was a local when it was expected to be an MPlace",
+                self.layout.ty
+            )
+        })
     }
 }
 
@@ -292,7 +307,7 @@ where
     M: Machine<'mir, 'tcx, Provenance = Prov>,
 {
     /// Take a value, which represents a (thin or wide) reference, and make it a place.
-    /// Alignment is just based on the type.  This is the inverse of `MemPlace::to_ref()`.
+    /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`.
     ///
     /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not
     /// want to ever use the place for memory access!
@@ -316,8 +331,7 @@ where
         Ok(MPlaceTy { mplace, layout, align })
     }
 
-    /// Take an operand, representing a pointer, and dereference it to a place -- that
-    /// will always be a MemPlace.  Lives in `place.rs` because it creates a place.
+    /// Take an operand, representing a pointer, and dereference it to a place.
     #[instrument(skip(self), level = "debug")]
     pub fn deref_operand(
         &self,
@@ -331,7 +345,7 @@ where
         }
 
         let mplace = self.ref_to_mplace(&val)?;
-        self.check_mplace_access(mplace, CheckInAllocMsg::DerefTest)?;
+        self.check_mplace(mplace)?;
         Ok(mplace)
     }
 
@@ -339,8 +353,9 @@ where
     pub(super) fn get_place_alloc(
         &self,
         place: &MPlaceTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra>>> {
-        assert!(!place.layout.is_unsized());
+    ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
+    {
+        assert!(place.layout.is_sized());
         assert!(!place.meta.has_meta());
         let size = place.layout.size;
         self.get_ptr_alloc(place.ptr, size, place.align)
@@ -350,25 +365,22 @@ where
     pub(super) fn get_place_alloc_mut(
         &mut self,
         place: &MPlaceTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra>>> {
-        assert!(!place.layout.is_unsized());
+    ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
+    {
+        assert!(place.layout.is_sized());
         assert!(!place.meta.has_meta());
         let size = place.layout.size;
         self.get_ptr_alloc_mut(place.ptr, size, place.align)
     }
 
     /// Check if this mplace is dereferenceable and sufficiently aligned.
-    fn check_mplace_access(
-        &self,
-        mplace: MPlaceTy<'tcx, M::Provenance>,
-        msg: CheckInAllocMsg,
-    ) -> InterpResult<'tcx> {
+    pub fn check_mplace(&self, mplace: MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
         let (size, align) = self
             .size_and_align_of_mplace(&mplace)?
             .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
         assert!(mplace.align <= align, "dynamic alignment less strict than static one?");
-        let align = M::enforce_alignment(self).then_some(align);
-        self.check_ptr_access_align(mplace.ptr, size, align.unwrap_or(Align::ONE), msg)?;
+        let align = if M::enforce_alignment(self).should_check() { align } else { Align::ONE };
+        self.check_ptr_access_align(mplace.ptr, size, align, CheckInAllocMsg::DerefTest)?;
         Ok(())
     }
 
@@ -485,7 +497,7 @@ where
         src: Immediate<M::Provenance>,
         dest: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
-        assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
+        assert!(dest.layout.is_sized(), "Cannot write unsized data");
         trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
 
         // See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
@@ -569,9 +581,9 @@ where
     }
 
     pub fn write_uninit(&mut self, dest: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
-        let mplace = match dest.try_as_mplace() {
-            Ok(mplace) => mplace,
-            Err((frame, local)) => {
+        let mplace = match dest.as_mplace_or_local() {
+            Left(mplace) => mplace,
+            Right((frame, local)) => {
                 match M::access_local_mut(self, frame, local)? {
                     Operand::Immediate(local) => {
                         *local = Immediate::Uninit;
@@ -639,7 +651,7 @@ where
         // Let us see if the layout is simple so we take a shortcut,
         // avoid force_allocation.
         let src = match self.read_immediate_raw(src)? {
-            Ok(src_val) => {
+            Right(src_val) => {
                 // FIXME(const_prop): Const-prop can possibly evaluate an
                 // unsized copy operation when it thinks that the type is
                 // actually sized, due to a trivially false where-clause
@@ -669,7 +681,7 @@ where
                     )
                 };
             }
-            Err(mplace) => mplace,
+            Left(mplace) => mplace,
         };
         // Slow path, this does not fit into an immediate. Just memcpy.
         trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
@@ -706,7 +718,7 @@ where
                     &mut Operand::Immediate(local_val) => {
                         // We need to make an allocation.
 
-                        // We need the layout of the local.  We can NOT use the layout we got,
+                        // We need the layout of the local. We can NOT use the layout we got,
                         // that might e.g., be an inner field of a struct with `Scalar` layout,
                         // that has different alignment than the outer field.
                         let local_layout =
@@ -746,7 +758,7 @@ where
         layout: TyAndLayout<'tcx>,
         kind: MemoryKind<M::MemoryKind>,
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
-        assert!(!layout.is_unsized());
+        assert!(layout.is_sized());
         let ptr = self.allocate_ptr(layout.size, layout.align.abi, kind)?;
         Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
     }
@@ -757,9 +769,9 @@ where
         str: &str,
         kind: MemoryKind<M::MemoryKind>,
         mutbl: Mutability,
-    ) -> MPlaceTy<'tcx, M::Provenance> {
-        let ptr = self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl);
-        let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self);
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+        let ptr = self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl)?;
+        let meta = Scalar::from_target_usize(u64::try_from(str.len()).unwrap(), self);
         let mplace = MemPlace { ptr: ptr.into(), meta: MemPlaceMeta::Meta(meta) };
 
         let ty = self.tcx.mk_ref(
@@ -767,95 +779,35 @@ where
             ty::TypeAndMut { ty: self.tcx.types.str_, mutbl },
         );
         let layout = self.layout_of(ty).unwrap();
-        MPlaceTy { mplace, layout, align: layout.align.abi }
+        Ok(MPlaceTy { mplace, layout, align: layout.align.abi })
     }
 
-    /// Writes the discriminant of the given variant.
-    #[instrument(skip(self), level = "debug")]
-    pub fn write_discriminant(
+    /// Writes the aggregate to the destination.
+    #[instrument(skip(self), level = "trace")]
+    pub fn write_aggregate(
         &mut self,
-        variant_index: VariantIdx,
+        kind: &mir::AggregateKind<'tcx>,
+        operands: &[mir::Operand<'tcx>],
         dest: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
-        // This must be an enum or generator.
-        match dest.layout.ty.kind() {
-            ty::Adt(adt, _) => assert!(adt.is_enum()),
-            ty::Generator(..) => {}
-            _ => span_bug!(
-                self.cur_span(),
-                "write_discriminant called on non-variant-type (neither enum nor generator)"
-            ),
-        }
-        // Layout computation excludes uninhabited variants from consideration
-        // therefore there's no way to represent those variants in the given layout.
-        // Essentially, uninhabited variants do not have a tag that corresponds to their
-        // discriminant, so we cannot do anything here.
-        // When evaluating we will always error before even getting here, but ConstProp 'executes'
-        // dead code, so we cannot ICE here.
-        if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
-            throw_ub!(UninhabitedEnumVariantWritten)
-        }
-
-        match dest.layout.variants {
-            abi::Variants::Single { index } => {
-                assert_eq!(index, variant_index);
-            }
-            abi::Variants::Multiple {
-                tag_encoding: TagEncoding::Direct,
-                tag: tag_layout,
-                tag_field,
-                ..
-            } => {
-                // No need to validate that the discriminant here because the
-                // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
-
-                let discr_val =
-                    dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
-
-                // raw discriminants for enums are isize or bigger during
-                // their computation, but the in-memory tag is the smallest possible
-                // representation
-                let size = tag_layout.size(self);
-                let tag_val = size.truncate(discr_val);
-
-                let tag_dest = self.place_field(dest, tag_field)?;
-                self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
-            }
-            abi::Variants::Multiple {
-                tag_encoding:
-                    TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
-                tag: tag_layout,
-                tag_field,
-                ..
-            } => {
-                // No need to validate that the discriminant here because the
-                // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
-
-                if variant_index != untagged_variant {
-                    let variants_start = niche_variants.start().as_u32();
-                    let variant_index_relative = variant_index
-                        .as_u32()
-                        .checked_sub(variants_start)
-                        .expect("overflow computing relative variant idx");
-                    // We need to use machine arithmetic when taking into account `niche_start`:
-                    // tag_val = variant_index_relative + niche_start_val
-                    let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?;
-                    let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
-                    let variant_index_relative_val =
-                        ImmTy::from_uint(variant_index_relative, tag_layout);
-                    let tag_val = self.binary_op(
-                        mir::BinOp::Add,
-                        &variant_index_relative_val,
-                        &niche_start_val,
-                    )?;
-                    // Write result.
-                    let niche_dest = self.place_field(dest, tag_field)?;
-                    self.write_immediate(*tag_val, &niche_dest)?;
-                }
+        self.write_uninit(&dest)?;
+        let (variant_index, variant_dest, active_field_index) = match *kind {
+            mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
+                let variant_dest = self.place_downcast(&dest, variant_index)?;
+                (variant_index, variant_dest, active_field_index)
             }
+            _ => (VariantIdx::from_u32(0), dest.clone(), None),
+        };
+        if active_field_index.is_some() {
+            assert_eq!(operands.len(), 1);
         }
-
-        Ok(())
+        for (field_index, operand) in operands.iter().enumerate() {
+            let field_index = active_field_index.unwrap_or(field_index);
+            let field_dest = self.place_field(&variant_dest, field_index)?;
+            let op = self.eval_operand(operand, Some(field_dest.layout))?;
+            self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
+        }
+        self.write_discriminant(variant_index, &dest)
     }
 
     pub fn raw_const_to_mplace(
@@ -870,11 +822,16 @@ where
     }
 
     /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
+    /// Aso returns the vtable.
     pub(super) fn unpack_dyn_trait(
         &self,
         mplace: &MPlaceTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
-        let vtable = mplace.vtable().to_pointer(self)?; // also sanity checks the type
+    ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> {
+        assert!(
+            matches!(mplace.layout.ty.kind(), ty::Dynamic(_, _, ty::Dyn)),
+            "`unpack_dyn_trait` only makes sense on `dyn*` types"
+        );
+        let vtable = mplace.meta.unwrap_meta().to_pointer(self)?;
         let (ty, _) = self.get_ptr_vtable(vtable)?;
         let layout = self.layout_of(ty)?;
 
@@ -883,7 +840,26 @@ where
             layout,
             align: layout.align.abi,
         };
-        Ok(mplace)
+        Ok((mplace, vtable))
+    }
+
+    /// Turn an operand with a `dyn* Trait` type into an operand with the actual dynamic type.
+    /// Aso returns the vtable.
+    pub(super) fn unpack_dyn_star(
+        &self,
+        op: &OpTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, (OpTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> {
+        assert!(
+            matches!(op.layout.ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
+            "`unpack_dyn_star` only makes sense on `dyn*` types"
+        );
+        let data = self.operand_field(&op, 0)?;
+        let vtable = self.operand_field(&op, 1)?;
+        let vtable = self.read_pointer(&vtable)?;
+        let (ty, _) = self.get_ptr_vtable(vtable)?;
+        let layout = self.layout_of(ty)?;
+        let data = data.transmute(layout);
+        Ok((data, vtable))
     }
 }
 
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index 6b2e2bb8aca..91da930db4f 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -7,6 +7,8 @@
 //! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually
 //! implement the logic on OpTy, and MPlaceTy calls that.
 
+use either::{Left, Right};
+
 use rustc_middle::mir;
 use rustc_middle::ty;
 use rustc_middle::ty::layout::LayoutOf;
@@ -84,13 +86,13 @@ where
         base: &OpTy<'tcx, M::Provenance>,
         field: usize,
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        let base = match base.try_as_mplace() {
-            Ok(ref mplace) => {
+        let base = match base.as_mplace_or_imm() {
+            Left(ref mplace) => {
                 // We can reuse the mplace field computation logic for indirect operands.
                 let field = self.mplace_field(mplace, field)?;
                 return Ok(field.into());
             }
-            Err(value) => value,
+            Right(value) => value,
         };
 
         let field_layout = base.layout.field(self, field);
@@ -204,8 +206,8 @@ where
         }
     }
 
-    // Iterates over all fields of an array. Much more efficient than doing the
-    // same by repeatedly calling `operand_index`.
+    /// Iterates over all fields of an array. Much more efficient than doing the
+    /// same by repeatedly calling `operand_index`.
     pub fn operand_array_fields<'a>(
         &self,
         base: &'a OpTy<'tcx, Prov>,
@@ -317,7 +319,7 @@ where
             // implement this.
             ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(*inner, inner_len)),
             ty::Slice(..) => {
-                let len = Scalar::from_machine_usize(inner_len, self);
+                let len = Scalar::from_target_usize(inner_len, self);
                 (MemPlaceMeta::Meta(len), base.layout.ty)
             }
             _ => {
@@ -361,7 +363,7 @@ where
             Index(local) => {
                 let layout = self.layout_of(self.tcx.types.usize)?;
                 let n = self.local_to_op(self.frame(), local, Some(layout))?;
-                let n = self.read_scalar(&n)?.to_machine_usize(self)?;
+                let n = self.read_target_usize(&n)?;
                 self.place_index(base, n)?
             }
             ConstantIndex { offset, min_length, from_end } => {
@@ -390,7 +392,7 @@ where
             Index(local) => {
                 let layout = self.layout_of(self.tcx.types.usize)?;
                 let n = self.local_to_op(self.frame(), local, Some(layout))?;
-                let n = self.read_scalar(&n)?.to_machine_usize(self)?;
+                let n = self.read_target_usize(&n)?;
                 self.operand_index(base, n)?
             }
             ConstantIndex { offset, min_length, from_end } => {
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index c6e04cbfb6b..9a366364e76 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -2,11 +2,13 @@
 //!
 //! The main entry point is the `step` method.
 
+use either::Either;
+
 use rustc_middle::mir;
 use rustc_middle::mir::interpret::{InterpResult, Scalar};
 use rustc_middle::ty::layout::LayoutOf;
 
-use super::{InterpCx, Machine};
+use super::{ImmTy, InterpCx, Machine};
 
 /// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
 /// same type as the result.
@@ -30,11 +32,6 @@ fn binop_right_homogeneous(op: mir::BinOp) -> bool {
 }
 
 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
-    pub fn run(&mut self) -> InterpResult<'tcx> {
-        while self.step()? {}
-        Ok(())
-    }
-
     /// Returns `true` as long as there are more things to do.
     ///
     /// This is used by [priroda](https://github.com/oli-obk/priroda)
@@ -46,7 +43,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             return Ok(false);
         }
 
-        let Ok(loc) = self.frame().loc else {
+        let Either::Left(loc) = self.frame().loc else {
             // We are unwinding and this fn has no cleanup code.
             // Just go on unwinding.
             trace!("unwinding: skipping frame");
@@ -61,7 +58,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             // Make sure we are not updating `statement_index` of the wrong frame.
             assert_eq!(old_frames, self.frame_idx());
             // Advance the program counter.
-            self.frame_mut().loc.as_mut().unwrap().statement_index += 1;
+            self.frame_mut().loc.as_mut().left().unwrap().statement_index += 1;
             return Ok(true);
         }
 
@@ -111,13 +108,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             // Stacked Borrows.
             Retag(kind, place) => {
                 let dest = self.eval_place(**place)?;
-                M::retag(self, *kind, &dest)?;
+                M::retag_place_contents(self, *kind, &dest)?;
             }
 
-            Intrinsic(box ref intrinsic) => self.emulate_nondiverging_intrinsic(intrinsic)?,
+            Intrinsic(box intrinsic) => self.emulate_nondiverging_intrinsic(intrinsic)?,
 
             // Statements we do not track.
-            AscribeUserType(..) => {}
+            PlaceMention(..) | AscribeUserType(..) => {}
 
             // Currently, Miri discards Coverage statements. Coverage statements are only injected
             // via an optional compile time MIR pass and have no side effects. Since Coverage
@@ -132,6 +129,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             // FIXME(#73156): Handle source code coverage in const eval
             Coverage(..) => {}
 
+            ConstEvalCounter => {
+                M::increment_const_eval_counter(self)?;
+            }
+
             // Defined to do nothing. These are added by optimization passes, to avoid changing the
             // size of MIR constantly.
             Nop => {}
@@ -166,8 +167,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 self.copy_op(&op, &dest, /*allow_transmute*/ false)?;
             }
 
-            CopyForDeref(ref place) => {
-                let op = self.eval_place_to_op(*place, Some(dest.layout))?;
+            CopyForDeref(place) => {
+                let op = self.eval_place_to_op(place, Some(dest.layout))?;
                 self.copy_op(&op, &dest, /* allow_transmute*/ false)?;
             }
 
@@ -184,9 +185,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let left = self.read_immediate(&self.eval_operand(left, None)?)?;
                 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
                 let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
-                self.binop_with_overflow(
-                    bin_op, /*force_overflow_checks*/ false, &left, &right, &dest,
-                )?;
+                self.binop_with_overflow(bin_op, &left, &right, &dest)?;
             }
 
             UnaryOp(un_op, ref operand) => {
@@ -198,18 +197,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
 
             Aggregate(box ref kind, ref operands) => {
-                assert!(matches!(kind, mir::AggregateKind::Array(..)));
-
-                for (field_index, operand) in operands.iter().enumerate() {
-                    let op = self.eval_operand(operand, None)?;
-                    let field_dest = self.place_field(&dest, field_index)?;
-                    self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
-                }
+                self.write_aggregate(kind, operands, &dest)?;
             }
 
             Repeat(ref operand, _) => {
                 let src = self.eval_operand(operand, None)?;
-                assert!(!src.layout.is_unsized());
+                assert!(src.layout.is_sized());
                 let dest = self.force_allocation(&dest)?;
                 let length = dest.len(self)?;
 
@@ -247,13 +240,44 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let src = self.eval_place(place)?;
                 let op = self.place_to_op(&src)?;
                 let len = op.len(self)?;
-                self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?;
+                self.write_scalar(Scalar::from_target_usize(len, self), &dest)?;
+            }
+
+            Ref(_, borrow_kind, place) => {
+                let src = self.eval_place(place)?;
+                let place = self.force_allocation(&src)?;
+                let val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
+                // A fresh reference was created, make sure it gets retagged.
+                let val = M::retag_ptr_value(
+                    self,
+                    if borrow_kind.allows_two_phase_borrow() {
+                        mir::RetagKind::TwoPhase
+                    } else {
+                        mir::RetagKind::Default
+                    },
+                    &val,
+                )?;
+                self.write_immediate(*val, &dest)?;
             }
 
-            AddressOf(_, place) | Ref(_, _, place) => {
+            AddressOf(_, place) => {
+                // Figure out whether this is an addr_of of an already raw place.
+                let place_base_raw = if place.has_deref() {
+                    let ty = self.frame().body.local_decls[place.local].ty;
+                    ty.is_unsafe_ptr()
+                } else {
+                    // Not a deref, and thus not raw.
+                    false
+                };
+
                 let src = self.eval_place(place)?;
                 let place = self.force_allocation(&src)?;
-                self.write_immediate(place.to_ref(self), &dest)?;
+                let mut val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
+                if !place_base_raw {
+                    // If this was not already raw, it needs retagging.
+                    val = M::retag_ptr_value(self, mir::RetagKind::Raw, &val)?;
+                }
+                self.write_immediate(*val, &dest)?;
             }
 
             NullaryOp(null_op, ty) => {
@@ -271,7 +295,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     mir::NullOp::SizeOf => layout.size.bytes(),
                     mir::NullOp::AlignOf => layout.align.abi.bytes(),
                 };
-                self.write_scalar(Scalar::from_machine_usize(val, self), &dest)?;
+                self.write_scalar(Scalar::from_target_usize(val, self), &dest)?;
             }
 
             ShallowInitBox(ref operand, _) => {
@@ -305,7 +329,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
         self.eval_terminator(terminator)?;
         if !self.stack().is_empty() {
-            if let Ok(loc) = self.frame().loc {
+            if let Either::Left(loc) = self.frame().loc {
                 info!("// executing {:?}", loc.block);
             }
         }
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index 57e40e168fa..685a5599cde 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -29,10 +29,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
             Goto { target } => self.go_to_block(target),
 
-            SwitchInt { ref discr, ref targets, switch_ty } => {
+            SwitchInt { ref discr, ref targets } => {
                 let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
                 trace!("SwitchInt({:?})", *discr);
-                assert_eq!(discr.layout.ty, switch_ty);
 
                 // Branch to the `otherwise` case by default, if no match is found.
                 let mut target_block = targets.otherwise();
@@ -74,7 +73,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let fn_sig =
                     self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder);
                 let extra_args = &args[fn_sig.inputs().len()..];
-                let extra_args = self.tcx.mk_type_list(extra_args.iter().map(|arg| arg.layout.ty));
+                let extra_args =
+                    self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout.ty));
 
                 let (fn_val, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
                     ty::FnPtr(_sig) => {
@@ -120,17 +120,32 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
 
             Drop { place, target, unwind } => {
+                let frame = self.frame();
+                let ty = place.ty(&frame.body.local_decls, *self.tcx).ty;
+                let ty = self.subst_from_frame_and_normalize_erasing_regions(frame, ty)?;
+                let instance = Instance::resolve_drop_in_place(*self.tcx, ty);
+                if let ty::InstanceDef::DropGlue(_, None) = instance.def {
+                    // This is the branch we enter if and only if the dropped type has no drop glue
+                    // whatsoever. This can happen as a result of monomorphizing a drop of a
+                    // generic. In order to make sure that generic and non-generic code behaves
+                    // roughly the same (and in keeping with Mir semantics) we do nothing here.
+                    self.go_to_block(target);
+                    return Ok(());
+                }
                 let place = self.eval_place(place)?;
-                let ty = place.layout.ty;
                 trace!("TerminatorKind::drop: {:?}, type {}", place, ty);
-
-                let instance = Instance::resolve_drop_in_place(*self.tcx, ty);
                 self.drop_in_place(&place, instance, target, unwind)?;
             }
 
             Assert { ref cond, expected, ref msg, target, cleanup } => {
+                let ignored = M::ignore_checkable_overflow_assertions(self)
+                    && match msg {
+                        mir::AssertKind::OverflowNeg(..) => true,
+                        mir::AssertKind::Overflow(op, ..) => op.is_checkable(),
+                        _ => false,
+                    };
                 let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?;
-                if expected == cond_val {
+                if ignored || expected == cond_val {
                     self.go_to_block(target);
                 } else {
                     M::assert_panic(self, msg, cleanup)?;
@@ -156,11 +171,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             Unreachable => throw_ub!(Unreachable),
 
             // These should never occur for MIR we actually run.
-            DropAndReplace { .. }
-            | FalseEdge { .. }
-            | FalseUnwind { .. }
-            | Yield { .. }
-            | GeneratorDrop => span_bug!(
+            FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | GeneratorDrop => span_bug!(
                 terminator.source_info.span,
                 "{:#?} should have been eliminated by MIR pass",
                 terminator.kind
@@ -438,7 +449,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     // they go to.
 
                     // For where they come from: If the ABI is RustCall, we untuple the
-                    // last incoming argument.  These two iterators do not have the same type,
+                    // last incoming argument. These two iterators do not have the same type,
                     // so to keep the code paths uniform we accept an allocation
                     // (for RustCall ABI only).
                     let caller_args: Cow<'_, [OpTy<'tcx, M::Provenance>]> =
@@ -473,7 +484,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         .filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore));
 
                     // Now we have to spread them out across the callee's locals,
-                    // taking into account the `spread_arg`.  If we could write
+                    // taking into account the `spread_arg`. If we could write
                     // this is a single iterator (that handles `spread_arg`), then
                     // `pass_argument` would be the loop body. It takes care to
                     // not advance `caller_iter` for ZSTs.
@@ -533,7 +544,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let receiver_place = loop {
                     match receiver.layout.ty.kind() {
                         ty::Ref(..) | ty::RawPtr(..) => break self.deref_operand(&receiver)?,
-                        ty::Dynamic(..) => break receiver.assert_mem_place(), // no immediate unsized values
+                        ty::Dynamic(.., ty::Dyn) => break receiver.assert_mem_place(), // no immediate unsized values
+                        ty::Dynamic(.., ty::DynStar) => {
+                            // Not clear how to handle this, so far we assume the receiver is always a pointer.
+                            span_bug!(
+                                self.cur_span(),
+                                "by-value calls on a `dyn*`... are those a thing?"
+                            );
+                        }
                         _ => {
                             // Not there yet, search for the only non-ZST field.
                             let mut non_zst_field = None;
@@ -559,39 +577,59 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         }
                     }
                 };
-                // Obtain the underlying trait we are working on.
-                let receiver_tail = self
-                    .tcx
-                    .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env);
-                let ty::Dynamic(data, ..) = receiver_tail.kind() else {
-                    span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail)
-                };
 
-                // Get the required information from the vtable.
-                let vptr = receiver_place.meta.unwrap_meta().to_pointer(self)?;
-                let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
-                if dyn_trait != data.principal() {
-                    throw_ub_format!(
-                        "`dyn` call on a pointer whose vtable does not match its type"
-                    );
-                }
+                // Obtain the underlying trait we are working on, and the adjusted receiver argument.
+                let (vptr, dyn_ty, adjusted_receiver) = if let ty::Dynamic(data, _, ty::DynStar) =
+                    receiver_place.layout.ty.kind()
+                {
+                    let (recv, vptr) = self.unpack_dyn_star(&receiver_place.into())?;
+                    let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
+                    if dyn_trait != data.principal() {
+                        throw_ub_format!(
+                            "`dyn*` call on a pointer whose vtable does not match its type"
+                        );
+                    }
+                    let recv = recv.assert_mem_place(); // we passed an MPlaceTy to `unpack_dyn_star` so we definitely still have one
+
+                    (vptr, dyn_ty, recv.ptr)
+                } else {
+                    // Doesn't have to be a `dyn Trait`, but the unsized tail must be `dyn Trait`.
+                    // (For that reason we also cannot use `unpack_dyn_trait`.)
+                    let receiver_tail = self
+                        .tcx
+                        .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env);
+                    let ty::Dynamic(data, _, ty::Dyn) = receiver_tail.kind() else {
+                            span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail)
+                        };
+                    assert!(receiver_place.layout.is_unsized());
+
+                    // Get the required information from the vtable.
+                    let vptr = receiver_place.meta.unwrap_meta().to_pointer(self)?;
+                    let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
+                    if dyn_trait != data.principal() {
+                        throw_ub_format!(
+                            "`dyn` call on a pointer whose vtable does not match its type"
+                        );
+                    }
+
+                    // It might be surprising that we use a pointer as the receiver even if this
+                    // is a by-val case; this works because by-val passing of an unsized `dyn
+                    // Trait` to a function is actually desugared to a pointer.
+                    (vptr, dyn_ty, receiver_place.ptr)
+                };
 
                 // Now determine the actual method to call. We can do that in two different ways and
                 // compare them to ensure everything fits.
                 let Some(ty::VtblEntry::Method(fn_inst)) = self.get_vtable_entries(vptr)?.get(idx).copied() else {
                     throw_ub_format!("`dyn` call trying to call something that is not a method")
                 };
+                trace!("Virtual call dispatches to {fn_inst:#?}");
                 if cfg!(debug_assertions) {
                     let tcx = *self.tcx;
 
                     let trait_def_id = tcx.trait_of_item(def_id).unwrap();
                     let virtual_trait_ref =
                         ty::TraitRef::from_method(tcx, trait_def_id, instance.substs);
-                    assert_eq!(
-                        receiver_tail,
-                        virtual_trait_ref.self_ty(),
-                        "mismatch in underlying dyn trait computation within Miri and MIR building",
-                    );
                     let existential_trait_ref =
                         ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref);
                     let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty);
@@ -606,17 +644,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     assert_eq!(fn_inst, concrete_method);
                 }
 
-                // `*mut receiver_place.layout.ty` is almost the layout that we
-                // want for args[0]: We have to project to field 0 because we want
-                // a thin pointer.
-                assert!(receiver_place.layout.is_unsized());
-                let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
-                let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0);
-                // Adjust receiver argument.
-                args[0] = OpTy::from(ImmTy::from_immediate(
-                    Scalar::from_maybe_pointer(receiver_place.ptr, self).into(),
-                    this_receiver_ptr,
-                ));
+                // Adjust receiver argument. Layout can be any (thin) ptr.
+                args[0] = ImmTy::from_immediate(
+                    Scalar::from_maybe_pointer(adjusted_receiver, self).into(),
+                    self.layout_of(self.tcx.mk_mut_ptr(dyn_ty))?,
+                )
+                .into();
                 trace!("Patched receiver operand to {:#?}", args[0]);
                 // recurse with concrete function
                 self.eval_fn_call(
@@ -640,20 +673,29 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         unwind: Option<mir::BasicBlock>,
     ) -> InterpResult<'tcx> {
         trace!("drop_in_place: {:?},\n  {:?}, {:?}", *place, place.layout.ty, instance);
-        // We take the address of the object.  This may well be unaligned, which is fine
-        // for us here.  However, unaligned accesses will probably make the actual drop
+        // We take the address of the object. This may well be unaligned, which is fine
+        // for us here. However, unaligned accesses will probably make the actual drop
         // implementation fail -- a problem shared by rustc.
         let place = self.force_allocation(place)?;
 
-        let (instance, place) = match place.layout.ty.kind() {
-            ty::Dynamic(..) => {
+        let place = match place.layout.ty.kind() {
+            ty::Dynamic(_, _, ty::Dyn) => {
                 // Dropping a trait object. Need to find actual drop fn.
-                let place = self.unpack_dyn_trait(&place)?;
-                let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
-                (instance, place)
+                self.unpack_dyn_trait(&place)?.0
+            }
+            ty::Dynamic(_, _, ty::DynStar) => {
+                // Dropping a `dyn*`. Need to find actual drop fn.
+                self.unpack_dyn_star(&place.into())?.0.assert_mem_place()
+            }
+            _ => {
+                debug_assert_eq!(
+                    instance,
+                    ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty)
+                );
+                place
             }
-            _ => (instance, place),
         };
+        let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
         let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
 
         let arg = ImmTy::from_immediate(
diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs
index cab23b7241f..fa15d466ac1 100644
--- a/compiler/rustc_const_eval/src/interpret/traits.rs
+++ b/compiler/rustc_const_eval/src/interpret/traits.rs
@@ -53,7 +53,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     ) -> InterpResult<'tcx, (Size, Align)> {
         let (ty, _trait_ref) = self.get_ptr_vtable(vtable)?;
         let layout = self.layout_of(ty)?;
-        assert!(!layout.is_unsized(), "there are no vtables for unsized types");
+        assert!(layout.is_sized(), "there are no vtables for unsized types");
         Ok((layout.size, layout.align.abi))
     }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
index 2bc521d5bbe..bf2b4ee69ab 100644
--- a/compiler/rustc_const_eval/src/interpret/util.rs
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -1,6 +1,7 @@
 use rustc_middle::mir::interpret::InterpResult;
-use rustc_middle::ty::{self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor};
-use std::convert::TryInto;
+use rustc_middle::ty::{
+    self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor,
+};
 use std::ops::ControlFlow;
 
 /// Checks whether a type contains generic parameters which require substitution.
@@ -10,7 +11,7 @@ use std::ops::ControlFlow;
 /// case these parameters are unused.
 pub(crate) fn ensure_monomorphic_enough<'tcx, T>(tcx: TyCtxt<'tcx>, ty: T) -> InterpResult<'tcx>
 where
-    T: TypeVisitable<'tcx>,
+    T: TypeVisitable<TyCtxt<'tcx>>,
 {
     debug!("ensure_monomorphic_enough: ty={:?}", ty);
     if !ty.needs_subst() {
@@ -22,12 +23,12 @@ where
         tcx: TyCtxt<'tcx>,
     }
 
-    impl<'tcx> TypeVisitor<'tcx> for UsedParamsNeedSubstVisitor<'tcx> {
+    impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for UsedParamsNeedSubstVisitor<'tcx> {
         type BreakTy = FoundParam;
 
         fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
             if !ty.needs_subst() {
-                return ControlFlow::CONTINUE;
+                return ControlFlow::Continue(());
             }
 
             match *ty.kind() {
@@ -41,16 +42,15 @@ where
                         let index = index
                             .try_into()
                             .expect("more generic parameters than can fit into a `u32`");
-                        let is_used = unused_params.contains(index).map_or(true, |unused| !unused);
                         // Only recurse when generic parameters in fns, closures and generators
                         // are used and require substitution.
                         // Just in case there are closures or generators within this subst,
                         // recurse.
-                        if is_used && subst.needs_subst() {
+                        if unused_params.is_used(index) && subst.needs_subst() {
                             return subst.visit_with(self);
                         }
                     }
-                    ControlFlow::CONTINUE
+                    ControlFlow::Continue(())
                 }
                 _ => ty.super_visit_with(self),
             }
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 8aa56c275d9..f7881c50960 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -4,10 +4,11 @@
 //! That's useful because it means other passes (e.g. promotion) can rely on `const`s
 //! to be const-safe.
 
-use std::convert::TryFrom;
 use std::fmt::{Display, Write};
 use std::num::NonZeroUsize;
 
+use either::{Left, Right};
+
 use rustc_ast::Mutability;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_hir as hir;
@@ -22,18 +23,18 @@ use std::hash::Hash;
 // for the validation errors
 use super::UndefinedBehaviorInfo::*;
 use super::{
-    CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine,
-    MemPlaceMeta, OpTy, Scalar, ValueVisitor,
+    AllocId, CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy,
+    Machine, MemPlaceMeta, OpTy, Pointer, Scalar, ValueVisitor,
 };
 
 macro_rules! throw_validation_failure {
-    ($where:expr, { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )?) => {{
+    ($where:expr, { $( $what_fmt:tt )* } $( expected { $( $expected_fmt:tt )* } )?) => {{
         let mut msg = String::new();
         msg.push_str("encountered ");
-        write!(&mut msg, $($what_fmt),+).unwrap();
+        write!(&mut msg, $($what_fmt)*).unwrap();
         $(
             msg.push_str(", but expected ");
-            write!(&mut msg, $($expected_fmt),+).unwrap();
+            write!(&mut msg, $($expected_fmt)*).unwrap();
         )?
         let path = rustc_middle::ty::print::with_no_trimmed_paths!({
             let where_ = &$where;
@@ -81,7 +82,7 @@ macro_rules! throw_validation_failure {
 ///
 macro_rules! try_validation {
     ($e:expr, $where:expr,
-    $( $( $p:pat_param )|+ => { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )? ),+ $(,)?
+    $( $( $p:pat_param )|+ => { $( $what_fmt:tt )* } $( expected { $( $expected_fmt:tt )* } )? ),+ $(,)?
     ) => {{
         match $e {
             Ok(x) => x,
@@ -92,7 +93,7 @@ macro_rules! try_validation {
                     InterpError::UndefinedBehavior($($p)|+) =>
                        throw_validation_failure!(
                             $where,
-                            { $( $what_fmt ),+ } $( expected { $( $expected_fmt ),+ } )?
+                            { $( $what_fmt )* } $( expected { $( $expected_fmt )* } )?
                         )
                 ),+,
                 #[allow(unreachable_patterns)]
@@ -174,7 +175,7 @@ fn write_path(out: &mut String, path: &[PathElem]) {
             TupleElem(idx) => write!(out, ".{}", idx),
             ArrayElem(idx) => write!(out, "[{}]", idx),
             // `.<deref>` does not match Rust syntax, but it is more readable for long paths -- and
-            // some of the other items here also are not Rust syntax.  Actually we can't
+            // some of the other items here also are not Rust syntax. Actually we can't
             // even use the usual syntax because we are just showing the projections,
             // not the root.
             Deref => write!(out, ".<deref>"),
@@ -239,10 +240,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
                 // FIXME this should be more descriptive i.e. CapturePlace instead of CapturedVar
                 // https://github.com/rust-lang/project-rfc-2229/issues/46
                 if let Some(local_def_id) = def_id.as_local() {
-                    let tables = self.ecx.tcx.typeck(local_def_id);
-                    if let Some(captured_place) =
-                        tables.closure_min_captures_flattened(local_def_id).nth(field)
-                    {
+                    let captures = self.ecx.tcx.closure_captures(local_def_id);
+                    if let Some(captured_place) = captures.get(field) {
                         // Sometimes the index is beyond the number of upvars (seen
                         // for a generator).
                         let var_hir_id = captured_place.get_root_variable();
@@ -334,7 +333,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
     ) -> InterpResult<'tcx> {
         let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
         match tail.kind() {
-            ty::Dynamic(..) => {
+            ty::Dynamic(_, _, ty::Dyn) => {
                 let vtable = meta.unwrap_meta().to_pointer(self.ecx)?;
                 // Make sure it is a genuine vtable pointer.
                 let (_ty, _trait) = try_validation!(
@@ -347,7 +346,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
                 // FIXME: check if the type/trait match what ty::Dynamic says?
             }
             ty::Slice(..) | ty::Str => {
-                let _len = meta.unwrap_meta().to_machine_usize(self.ecx)?;
+                let _len = meta.unwrap_meta().to_target_usize(self.ecx)?;
                 // We do not check that `len * elem_size <= isize::MAX`:
                 // that is only required for references, and there it falls out of the
                 // "dereferenceable" check performed by Stacked Borrows.
@@ -398,12 +397,15 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
                 {
                     "an unaligned {kind} (required {} byte alignment but found {})",
                     required.bytes(),
-                    has.bytes()
+                    has.bytes(),
                 },
             DanglingIntPointer(0, _) =>
                 { "a null {kind}" },
             DanglingIntPointer(i, _) =>
-                { "a dangling {kind} (address {i:#x} is unallocated)" },
+                {
+                    "a dangling {kind} ({pointer} has no provenance)",
+                    pointer = Pointer::<Option<AllocId>>::from_addr_invalid(*i),
+                },
             PointerOutOfBounds { .. } =>
                 { "a dangling {kind} (going beyond the bounds of its allocation)" },
             // This cannot happen during const-eval (because interning already detects
@@ -418,7 +420,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
             )
         }
         // Recursive checking
-        if let Some(ref mut ref_tracking) = self.ref_tracking {
+        if let Some(ref_tracking) = self.ref_tracking.as_deref_mut() {
             // Proceed recursively even for ZST, no reason to skip them!
             // `!` is a ZST and we want to validate it.
             if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr) {
@@ -483,7 +485,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
     }
 
     /// Check if this is a value of primitive type, and if yes check the validity of the value
-    /// at that type.  Return `true` if the type is indeed primitive.
+    /// at that type. Return `true` if the type is indeed primitive.
     fn try_visit_primitive(
         &mut self,
         value: &OpTy<'tcx, M::Provenance>,
@@ -600,8 +602,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
             | ty::Placeholder(..)
             | ty::Bound(..)
             | ty::Param(..)
-            | ty::Opaque(..)
-            | ty::Projection(..)
+            | ty::Alias(..)
+            | ty::GeneratorWitnessMIR(..)
             | ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty),
         }
     }
@@ -623,7 +625,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
                 // Can only happen during CTFE.
                 // We support 2 kinds of ranges here: full range, and excluding zero.
                 if start == 1 && end == max_value {
-                    // Only null is the niche.  So make sure the ptr is NOT null.
+                    // Only null is the niche. So make sure the ptr is NOT null.
                     if self.ecx.scalar_may_be_null(scalar)? {
                         throw_validation_failure!(self.path,
                             { "a potentially null pointer" }
@@ -759,7 +761,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
         // Recursively walk the value at its type.
         self.walk_value(op)?;
 
-        // *After* all of this, check the ABI.  We need to check the ABI to handle
+        // *After* all of this, check the ABI. We need to check the ABI to handle
         // types like `NonNull` where the `Scalar` info is more restrictive than what
         // the fields say (`rustc_layout_scalar_valid_range_start`).
         // But in most cases, this will just propagate what the fields say,
@@ -783,18 +785,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                 }
             }
             Abi::ScalarPair(a_layout, b_layout) => {
-                // There is no `rustc_layout_scalar_valid_range_start` for pairs, so
-                // we would validate these things as we descend into the fields,
-                // but that can miss bugs in layout computation. Layout computation
-                // is subtle due to enums having ScalarPair layout, where one field
-                // is the discriminant.
-                if cfg!(debug_assertions)
-                    && !a_layout.is_uninit_valid()
-                    && !b_layout.is_uninit_valid()
-                {
-                    // We can only proceed if *both* scalars need to be initialized.
-                    // FIXME: find a way to also check ScalarPair when one side can be uninit but
-                    // the other must be init.
+                // We can only proceed if *both* scalars need to be initialized.
+                // FIXME: find a way to also check ScalarPair when one side can be uninit but
+                // the other must be init.
+                if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
                     let (a, b) =
                         self.read_immediate(op, "initiailized scalar value")?.to_scalar_pair();
                     self.visit_scalar(a, a_layout)?;
@@ -852,9 +846,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                     return Ok(());
                 }
                 // Now that we definitely have a non-ZST array, we know it lives in memory.
-                let mplace = match op.try_as_mplace() {
-                    Ok(mplace) => mplace,
-                    Err(imm) => match *imm {
+                let mplace = match op.as_mplace_or_imm() {
+                    Left(mplace) => mplace,
+                    Right(imm) => match *imm {
                         Immediate::Uninit =>
                             throw_validation_failure!(self.path, { "uninitialized bytes" }),
                         Immediate::Scalar(..) | Immediate::ScalarPair(..) =>
@@ -865,10 +859,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                 // Optimization: we just check the entire range at once.
                 // NOTE: Keep this in sync with the handling of integer and float
                 // types above, in `visit_primitive`.
-                // In run-time mode, we accept pointers in here.  This is actually more
+                // In run-time mode, we accept pointers in here. This is actually more
                 // permissive than a per-element check would be, e.g., we accept
                 // a &[u8] that contains a pointer even though bytewise checking would
-                // reject it.  However, that's good: We don't inherently want
+                // reject it. However, that's good: We don't inherently want
                 // to reject those pointers, we just do not have the machinery to
                 // talk about parts of a pointer.
                 // We also accept uninit, for consistency with the slow path.
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
index aee1f93b1a3..7a14459399c 100644
--- a/compiler/rustc_const_eval/src/interpret/visitor.rs
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -284,7 +284,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
         &self,
         ecx: &InterpCx<'mir, 'tcx, M>,
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        // We `force_allocation` here so that `from_op` below can work.
+        // No need for `force_allocation` since we are just going to read from this.
         ecx.place_to_op(self)
     }
 
@@ -324,7 +324,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
 
 macro_rules! make_value_visitor {
     ($visitor_trait:ident, $value_trait:ident, $($mutability:ident)?) => {
-        // How to traverse a value and what to do when we are at the leaves.
+        /// How to traverse a value and what to do when we are at the leaves.
         pub trait $visitor_trait<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
             type V: $value_trait<'mir, 'tcx, M>;
 
@@ -421,15 +421,25 @@ macro_rules! make_value_visitor {
                 // Special treatment for special types, where the (static) layout is not sufficient.
                 match *ty.kind() {
                     // If it is a trait object, switch to the real type that was used to create it.
-                    ty::Dynamic(..) => {
+                    ty::Dynamic(_, _, ty::Dyn) => {
+                        // Dyn types. This is unsized, and the actual dynamic type of the data is given by the
+                        // vtable stored in the place metadata.
                         // unsized values are never immediate, so we can assert_mem_place
                         let op = v.to_op_for_read(self.ecx())?;
                         let dest = op.assert_mem_place();
-                        let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?;
+                        let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0;
                         trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
                         // recurse with the inner type
                         return self.visit_field(&v, 0, &$value_trait::from_op(&inner_mplace.into()));
                     },
+                    ty::Dynamic(_, _, ty::DynStar) => {
+                        // DynStar types. Very different from a dyn type (but strangely part of the
+                        // same variant in `TyKind`): These are pairs where the 2nd component is the
+                        // vtable, and the first component is the data (which must be ptr-sized).
+                        let op = v.to_op_for_proj(self.ecx())?;
+                        let data = self.ecx().unpack_dyn_star(&op)?.0;
+                        return self.visit_field(&v, 0, &$value_trait::from_op(&data));
+                    }
                     // Slices do not need special handling here: they have `Array` field
                     // placement with length 0, so we enter the `Array` case below which
                     // indirectly uses the metadata to determine the actual length.
@@ -481,12 +491,12 @@ macro_rules! make_value_visitor {
                 };
 
                 // Visit the fields of this value.
-                match v.layout().fields {
+                match &v.layout().fields {
                     FieldsShape::Primitive => {}
-                    FieldsShape::Union(fields) => {
+                    &FieldsShape::Union(fields) => {
                         self.visit_union(v, fields)?;
                     }
-                    FieldsShape::Arbitrary { ref offsets, .. } => {
+                    FieldsShape::Arbitrary { offsets, .. } => {
                         // FIXME: We collect in a vec because otherwise there are lifetime
                         // errors: Projecting to a field needs access to `ecx`.
                         let fields: Vec<InterpResult<'tcx, Self::V>> =
diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs
index 443c01fdb90..ed9efe568fb 100644
--- a/compiler/rustc_const_eval/src/lib.rs
+++ b/compiler/rustc_const_eval/src/lib.rs
@@ -6,7 +6,6 @@ Rust MIR: a lowered representation of Rust.
 
 #![feature(assert_matches)]
 #![feature(box_patterns)]
-#![feature(control_flow_enum)]
 #![feature(decl_macro)]
 #![feature(exact_size_is_empty)]
 #![feature(let_chains)]
@@ -20,6 +19,7 @@ Rust MIR: a lowered representation of Rust.
 #![feature(trusted_step)]
 #![feature(try_blocks)]
 #![feature(yeet_expr)]
+#![feature(if_let_guard)]
 #![feature(is_some_and)]
 #![recursion_limit = "256"]
 
@@ -34,9 +34,12 @@ pub mod interpret;
 pub mod transform;
 pub mod util;
 
+use rustc_errors::{DiagnosticMessage, SubdiagnosticMessage};
+use rustc_macros::fluent_messages;
 use rustc_middle::ty;
 use rustc_middle::ty::query::Providers;
-use rustc_target::abi::InitKind;
+
+fluent_messages! { "../locales/en-US.ftl" }
 
 pub fn provide(providers: &mut Providers) {
     const_eval::provide(providers);
@@ -58,7 +61,7 @@ pub fn provide(providers: &mut Providers) {
         let (param_env, value) = param_env_and_value.into_parts();
         const_eval::deref_mir_constant(tcx, param_env, value)
     };
-    providers.permits_uninit_init =
-        |tcx, ty| util::might_permit_raw_init(tcx, ty, InitKind::UninitMitigated0x01Fill);
-    providers.permits_zero_init = |tcx, ty| util::might_permit_raw_init(tcx, ty, InitKind::Zero);
+    providers.check_validity_requirement = |tcx, (init_kind, param_env_and_ty)| {
+        util::check_validity_requirement(tcx, init_kind, param_env_and_ty)
+    };
 }
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
index 5a8b3e30b9f..db55dbc2bfd 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -10,7 +10,7 @@ use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceC
 use rustc_middle::mir::*;
 use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
 use rustc_middle::ty::{self, adjustment::PointerCast, Instance, InstanceDef, Ty, TyCtxt};
-use rustc_middle::ty::{Binder, TraitPredicate, TraitRef, TypeVisitable};
+use rustc_middle::ty::{Binder, TraitRef, TypeVisitableExt};
 use rustc_mir_dataflow::{self, Analysis};
 use rustc_span::{sym, Span, Symbol};
 use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
@@ -242,7 +242,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
             // impl trait is gone in MIR, so check the return type of a const fn by its signature
             // instead of the type of the return place.
             self.span = body.local_decls[RETURN_PLACE].source_info.span;
-            let return_ty = tcx.fn_sig(def_id).output();
+            let return_ty = self.ccx.fn_sig().output();
             self.check_local_or_return_ty(return_ty.skip_binder(), RETURN_PLACE);
         }
 
@@ -332,7 +332,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> {
 
     fn check_static(&mut self, def_id: DefId, span: Span) {
         if self.tcx.is_thread_local_static(def_id) {
-            self.tcx.sess.delay_span_bug(span, "tls access is checked in `Rvalue::ThreadLocalRef");
+            self.tcx.sess.delay_span_bug(span, "tls access is checked in `Rvalue::ThreadLocalRef`");
         }
         self.check_op_spanned(ops::StaticAccess, span)
     }
@@ -442,18 +442,24 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
 
         self.super_rvalue(rvalue, location);
 
-        match *rvalue {
+        match rvalue {
             Rvalue::ThreadLocalRef(_) => self.check_op(ops::ThreadLocalAccess),
 
             Rvalue::Use(_)
             | Rvalue::CopyForDeref(..)
             | Rvalue::Repeat(..)
             | Rvalue::Discriminant(..)
-            | Rvalue::Len(_)
-            | Rvalue::Aggregate(..) => {}
+            | Rvalue::Len(_) => {}
+
+            Rvalue::Aggregate(kind, ..) => {
+                if let AggregateKind::Generator(def_id, ..) = kind.as_ref()
+                    && let Some(generator_kind @ hir::GeneratorKind::Async(..)) = self.tcx.generator_kind(def_id)
+                {
+                    self.check_op(ops::Generator(generator_kind));
+                }
+            }
 
-            Rvalue::Ref(_, kind @ BorrowKind::Mut { .. }, ref place)
-            | Rvalue::Ref(_, kind @ BorrowKind::Unique, ref place) => {
+            Rvalue::Ref(_, kind @ (BorrowKind::Mut { .. } | BorrowKind::Unique), place) => {
                 let ty = place.ty(self.body, self.tcx).ty;
                 let is_allowed = match ty.kind() {
                     // Inside a `static mut`, `&mut [...]` is allowed.
@@ -467,7 +473,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                     // that this is merely a ZST and it is already eligible for promotion.
                     // This may require an RFC?
                     /*
-                    ty::Array(_, len) if len.try_eval_usize(cx.tcx, cx.param_env) == Some(0)
+                    ty::Array(_, len) if len.try_eval_target_usize(cx.tcx, cx.param_env) == Some(0)
                         => true,
                     */
                     _ => false,
@@ -482,12 +488,12 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                 }
             }
 
-            Rvalue::AddressOf(Mutability::Mut, ref place) => {
+            Rvalue::AddressOf(Mutability::Mut, place) => {
                 self.check_mut_borrow(place.local, hir::BorrowKind::Raw)
             }
 
-            Rvalue::Ref(_, BorrowKind::Shared | BorrowKind::Shallow, ref place)
-            | Rvalue::AddressOf(Mutability::Not, ref place) => {
+            Rvalue::Ref(_, BorrowKind::Shared | BorrowKind::Shallow, place)
+            | Rvalue::AddressOf(Mutability::Not, place) => {
                 let borrowed_place_has_mut_interior = qualifs::in_place::<HasMutInterior, _>(
                     &self.ccx,
                     &mut |local| self.qualifs.has_mut_interior(self.ccx, local, location),
@@ -555,7 +561,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
             Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf, _) => {}
             Rvalue::ShallowInitBox(_, _) => {}
 
-            Rvalue::UnaryOp(_, ref operand) => {
+            Rvalue::UnaryOp(_, operand) => {
                 let ty = operand.ty(self.body, self.tcx);
                 if is_int_bool_or_char(ty) {
                     // Int, bool, and char operations are fine.
@@ -566,8 +572,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                 }
             }
 
-            Rvalue::BinaryOp(op, box (ref lhs, ref rhs))
-            | Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
+            Rvalue::BinaryOp(op, box (lhs, rhs))
+            | Rvalue::CheckedBinaryOp(op, box (lhs, rhs)) => {
                 let lhs_ty = lhs.ty(self.body, self.tcx);
                 let rhs_ty = rhs.ty(self.body, self.tcx);
 
@@ -576,13 +582,16 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                 } else if lhs_ty.is_fn_ptr() || lhs_ty.is_unsafe_ptr() {
                     assert_eq!(lhs_ty, rhs_ty);
                     assert!(
-                        op == BinOp::Eq
-                            || op == BinOp::Ne
-                            || op == BinOp::Le
-                            || op == BinOp::Lt
-                            || op == BinOp::Ge
-                            || op == BinOp::Gt
-                            || op == BinOp::Offset
+                        matches!(
+                            op,
+                            BinOp::Eq
+                            | BinOp::Ne
+                            | BinOp::Le
+                            | BinOp::Lt
+                            | BinOp::Ge
+                            | BinOp::Gt
+                            | BinOp::Offset
+                        )
                     );
 
                     self.check_op(ops::RawPtrComparison);
@@ -681,9 +690,11 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
             | StatementKind::StorageLive(_)
             | StatementKind::StorageDead(_)
             | StatementKind::Retag { .. }
+            | StatementKind::PlaceMention(..)
             | StatementKind::AscribeUserType(..)
             | StatementKind::Coverage(..)
             | StatementKind::Intrinsic(..)
+            | StatementKind::ConstEvalCounter
             | StatementKind::Nop => {}
         }
     }
@@ -721,18 +732,16 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                             substs,
                             span: *fn_span,
                             from_hir_call: *from_hir_call,
+                            feature: Some(sym::const_trait_impl),
                         });
                         return;
                     }
 
                     let trait_ref = TraitRef::from_method(tcx, trait_id, substs);
-                    let poly_trait_pred = Binder::dummy(TraitPredicate {
-                        trait_ref,
-                        constness: ty::BoundConstness::ConstIfConst,
-                        polarity: ty::ImplPolarity::Positive,
-                    });
+                    let poly_trait_pred =
+                        Binder::dummy(trait_ref).with_constness(ty::BoundConstness::ConstIfConst);
                     let obligation =
-                        Obligation::new(ObligationCause::dummy(), param_env, poly_trait_pred);
+                        Obligation::new(tcx, ObligationCause::dummy(), param_env, poly_trait_pred);
 
                     let implsrc = {
                         let infcx = tcx.infer_ctxt().build();
@@ -747,16 +756,12 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                         let ocx = ObligationCtxt::new(&infcx);
 
                         let predicates = tcx.predicates_of(callee).instantiate(tcx, substs);
-                        let hir_id = tcx
-                            .hir()
-                            .local_def_id_to_hir_id(self.body.source.def_id().expect_local());
                         let cause = ObligationCause::new(
                             terminator.source_info.span,
-                            hir_id,
+                            self.body.source.def_id().expect_local(),
                             ObligationCauseCode::ItemObligation(callee),
                         );
-                        let normalized_predicates =
-                            ocx.normalize(cause.clone(), param_env, predicates);
+                        let normalized_predicates = ocx.normalize(&cause, param_env, predicates);
                         ocx.register_obligations(traits::predicates_for_generics(
                             |_, _| cause.clone(),
                             self.param_env,
@@ -765,7 +770,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
 
                         let errors = ocx.select_all_or_error();
                         if !errors.is_empty() {
-                            infcx.err_ctxt().report_fulfillment_errors(&errors, None);
+                            infcx.err_ctxt().report_fulfillment_errors(&errors);
                         }
                     }
 
@@ -777,6 +782,20 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                             );
                             return;
                         }
+                        Ok(Some(ImplSource::Closure(data))) => {
+                            if !tcx.is_const_fn_raw(data.closure_def_id) {
+                                self.check_op(ops::FnCallNonConst {
+                                    caller,
+                                    callee,
+                                    substs,
+                                    span: *fn_span,
+                                    from_hir_call: *from_hir_call,
+                                    feature: None,
+                                });
+
+                                return;
+                            }
+                        }
                         Ok(Some(ImplSource::UserDefined(data))) => {
                             let callee_name = tcx.item_name(callee);
                             if let Some(&did) = tcx
@@ -797,6 +816,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                                     substs,
                                     span: *fn_span,
                                     from_hir_call: *from_hir_call,
+                                    feature: None,
                                 });
                                 return;
                             }
@@ -816,11 +836,10 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
 
                             if !nonconst_call_permission {
                                 let obligation = Obligation::new(
+                                    tcx,
                                     ObligationCause::dummy_with_span(*fn_span),
                                     param_env,
-                                    tcx.mk_predicate(
-                                        poly_trait_pred.map_bound(ty::PredicateKind::Trait),
-                                    ),
+                                    poly_trait_pred,
                                 );
 
                                 // improve diagnostics by showing what failed. Our requirements are stricter this time
@@ -840,6 +859,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                                     substs,
                                     span: *fn_span,
                                     from_hir_call: *from_hir_call,
+                                    feature: None,
                                 });
                                 return;
                             }
@@ -888,14 +908,6 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                     return;
                 }
 
-                // `async` blocks get lowered to `std::future::from_generator(/* a closure */)`.
-                let is_async_block = Some(callee) == tcx.lang_items().from_generator_fn();
-                if is_async_block {
-                    let kind = hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block);
-                    self.check_op(ops::Generator(kind));
-                    return;
-                }
-
                 if !tcx.is_const_fn_raw(callee) {
                     if !tcx.is_const_default_method(callee) {
                         // To get to here we must have already found a const impl for the
@@ -907,6 +919,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                             substs,
                             span: *fn_span,
                             from_hir_call: *from_hir_call,
+                            feature: None,
                         });
                         return;
                     }
@@ -914,15 +927,24 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
 
                 // If the `const fn` we are trying to call is not const-stable, ensure that we have
                 // the proper feature gate enabled.
-                if let Some(gate) = is_unstable_const_fn(tcx, callee) {
+                if let Some((gate, implied_by)) = is_unstable_const_fn(tcx, callee) {
                     trace!(?gate, "calling unstable const fn");
                     if self.span.allows_unstable(gate) {
                         return;
                     }
+                    if let Some(implied_by_gate) = implied_by && self.span.allows_unstable(implied_by_gate) {
+                        return;
+                    }
 
                     // Calling an unstable function *always* requires that the corresponding gate
-                    // be enabled, even if the function has `#[rustc_allow_const_fn_unstable(the_gate)]`.
-                    if !tcx.features().declared_lib_features.iter().any(|&(sym, _)| sym == gate) {
+                    // (or implied gate) be enabled, even if the function has
+                    // `#[rustc_allow_const_fn_unstable(the_gate)]`.
+                    let gate_declared = |gate| {
+                        tcx.features().declared_lib_features.iter().any(|&(sym, _)| sym == gate)
+                    };
+                    let feature_gate_declared = gate_declared(gate);
+                    let implied_gate_declared = implied_by.map(gate_declared).unwrap_or(false);
+                    if !feature_gate_declared && !implied_gate_declared {
                         self.check_op(ops::FnCallUnstable(callee, Some(gate)));
                         return;
                     }
@@ -935,7 +957,6 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                     }
 
                     // Otherwise, we are something const-stable calling a const-unstable fn.
-
                     if super::rustc_allow_const_fn_unstable(tcx, caller, gate) {
                         trace!("rustc_allow_const_fn_unstable gate active");
                         return;
@@ -965,8 +986,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
 
             // Forbid all `Drop` terminators unless the place being dropped is a local with no
             // projections that cannot be `NeedsNonConstDrop`.
-            TerminatorKind::Drop { place: dropped_place, .. }
-            | TerminatorKind::DropAndReplace { place: dropped_place, .. } => {
+            TerminatorKind::Drop { place: dropped_place, .. } => {
                 // If we are checking live drops after drop-elaboration, don't emit duplicate
                 // errors here.
                 if super::post_drop_elaboration::checking_enabled(self.ccx) {
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
index 25b420bed17..0e4501922f4 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
@@ -8,7 +8,7 @@ use rustc_attr as attr;
 use rustc_hir as hir;
 use rustc_hir::def_id::{DefId, LocalDefId};
 use rustc_middle::mir;
-use rustc_middle::ty::{self, TyCtxt};
+use rustc_middle::ty::{self, PolyFnSig, TyCtxt};
 use rustc_span::Symbol;
 
 pub use self::qualifs::Qualif;
@@ -62,7 +62,18 @@ impl<'mir, 'tcx> ConstCx<'mir, 'tcx> {
     }
 
     fn is_async(&self) -> bool {
-        self.tcx.asyncness(self.def_id()) == hir::IsAsync::Async
+        self.tcx.asyncness(self.def_id()).is_async()
+    }
+
+    pub fn fn_sig(&self) -> PolyFnSig<'tcx> {
+        let did = self.def_id().to_def_id();
+        if self.tcx.is_closure(did) {
+            let ty = self.tcx.type_of(did).subst_identity();
+            let ty::Closure(_, substs) = ty.kind() else { bug!("type_of closure not ty::Closure") };
+            substs.as_closure().sig()
+        } else {
+            self.tcx.fn_sig(did).subst_identity()
+        }
     }
 }
 
@@ -75,14 +86,14 @@ pub fn rustc_allow_const_fn_unstable(
     attr::rustc_allow_const_fn_unstable(&tcx.sess, attrs).any(|name| name == feature_gate)
 }
 
-// Returns `true` if the given `const fn` is "const-stable".
-//
-// Panics if the given `DefId` does not refer to a `const fn`.
-//
-// Const-stability is only relevant for `const fn` within a `staged_api` crate. Only "const-stable"
-// functions can be called in a const-context by users of the stable compiler. "const-stable"
-// functions are subject to more stringent restrictions than "const-unstable" functions: They
-// cannot use unstable features and can only call other "const-stable" functions.
+/// Returns `true` if the given `const fn` is "const-stable".
+///
+/// Panics if the given `DefId` does not refer to a `const fn`.
+///
+/// Const-stability is only relevant for `const fn` within a `staged_api` crate. Only "const-stable"
+/// functions can be called in a const-context by users of the stable compiler. "const-stable"
+/// functions are subject to more stringent restrictions than "const-unstable" functions: They
+/// cannot use unstable features and can only call other "const-stable" functions.
 pub fn is_const_stable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
     // A default body in a `#[const_trait]` is not const-stable because const
     // trait fns currently cannot be const-stable. We shouldn't
@@ -115,7 +126,7 @@ fn is_parent_const_stable_trait(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
     let local_def_id = def_id.expect_local();
     let hir_id = tcx.local_def_id_to_hir_id(local_def_id);
 
-    let Some(parent) = tcx.hir().find_parent_node(hir_id) else { return false };
+    let Some(parent) = tcx.hir().opt_parent_id(hir_id) else { return false };
     let parent_def = tcx.hir().get(parent);
 
     if !matches!(
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
index b28d7019491..e586720a0d0 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
@@ -1,7 +1,7 @@
 //! Concrete error types for all operations which may be invalid in a certain const context.
 
 use hir::def_id::LocalDefId;
-use hir::ConstContext;
+use hir::{ConstContext, LangItem};
 use rustc_errors::{
     error_code, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed,
 };
@@ -12,24 +12,15 @@ use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
 use rustc_middle::mir;
 use rustc_middle::ty::print::with_no_trimmed_paths;
 use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
-use rustc_middle::ty::{
-    suggest_constraining_type_param, Adt, Closure, DefIdTree, FnDef, FnPtr, Param, TraitPredicate,
-    Ty,
-};
-use rustc_middle::ty::{Binder, BoundConstness, ImplPolarity, TraitRef};
+use rustc_middle::ty::{suggest_constraining_type_param, Adt, Closure, FnDef, FnPtr, Param, Ty};
+use rustc_middle::ty::{Binder, TraitRef};
 use rustc_session::parse::feature_err;
 use rustc_span::symbol::sym;
 use rustc_span::{BytePos, Pos, Span, Symbol};
 use rustc_trait_selection::traits::SelectionContext;
 
 use super::ConstCx;
-use crate::errors::{
-    InteriorMutabilityBorrow, InteriorMutableDataRefer, MutDerefErr, NonConstFmtMacroCall,
-    NonConstFnCall, NonConstOpErr, PanicNonStrErr, RawPtrToIntErr, StaticAccessErr,
-    TransientMutBorrowErr, TransientMutBorrowErrRaw, UnallowedFnPointerCall,
-    UnallowedHeapAllocations, UnallowedInlineAsm, UnallowedMutableRefs, UnallowedMutableRefsRaw,
-    UnallowedOpInConstContext, UnstableConstFn,
-};
+use crate::errors;
 use crate::util::{call_kind, CallDesugaringKind, CallKind};
 
 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
@@ -100,7 +91,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallIndirect {
         ccx: &ConstCx<'_, 'tcx>,
         span: Span,
     ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
-        ccx.tcx.sess.create_err(UnallowedFnPointerCall { span, kind: ccx.const_kind() })
+        ccx.tcx.sess.create_err(errors::UnallowedFnPointerCall { span, kind: ccx.const_kind() })
     }
 }
 
@@ -112,6 +103,7 @@ pub struct FnCallNonConst<'tcx> {
     pub substs: SubstsRef<'tcx>,
     pub span: Span,
     pub from_hir_call: bool,
+    pub feature: Option<Symbol>,
 }
 
 impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
@@ -120,7 +112,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
         ccx: &ConstCx<'_, 'tcx>,
         _: Span,
     ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
-        let FnCallNonConst { caller, callee, substs, span, from_hir_call } = *self;
+        let FnCallNonConst { caller, callee, substs, span, from_hir_call, feature } = *self;
         let ConstCx { tcx, param_env, .. } = *ccx;
 
         let diag_trait = |err, self_ty: Ty<'_>, trait_id| {
@@ -142,18 +134,16 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
                             &param_ty.name.as_str(),
                             &constraint,
                             None,
+                            None,
                         );
                     }
                 }
                 Adt(..) => {
                     let obligation = Obligation::new(
+                        tcx,
                         ObligationCause::dummy(),
                         param_env,
-                        Binder::dummy(TraitPredicate {
-                            trait_ref,
-                            constness: BoundConstness::NotConst,
-                            polarity: ImplPolarity::Positive,
-                        }),
+                        Binder::dummy(trait_ref),
                     );
 
                     let infcx = tcx.infer_ctxt().build();
@@ -303,13 +293,14 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
                     err.span_note(deref_target, "deref defined here");
                 }
 
-                diag_trait(&mut err, self_ty, tcx.lang_items().deref_trait().unwrap());
+                diag_trait(&mut err, self_ty, tcx.require_lang_item(LangItem::Deref, Some(span)));
                 err
             }
-            _ if tcx.opt_parent(callee) == tcx.get_diagnostic_item(sym::ArgumentV1Methods) => {
-                ccx.tcx.sess.create_err(NonConstFmtMacroCall { span, kind: ccx.const_kind() })
-            }
-            _ => ccx.tcx.sess.create_err(NonConstFnCall {
+            _ if tcx.opt_parent(callee) == tcx.get_diagnostic_item(sym::ArgumentV1Methods) => ccx
+                .tcx
+                .sess
+                .create_err(errors::NonConstFmtMacroCall { span, kind: ccx.const_kind() }),
+            _ => ccx.tcx.sess.create_err(errors::NonConstFnCall {
                 span,
                 def_path_str: ccx.tcx.def_path_str_with_substs(callee, substs),
                 kind: ccx.const_kind(),
@@ -322,6 +313,13 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
             ccx.const_kind(),
         ));
 
+        if let Some(feature) = feature && ccx.tcx.sess.is_nightly_build() {
+            err.help(&format!(
+                "add `#![feature({})]` to the crate attributes to enable",
+                feature,
+            ));
+        }
+
         if let ConstContext::Static(_) = ccx.const_kind() {
             err.note("consider wrapping this expression in `Lazy::new(|| ...)` from the `once_cell` crate: https://crates.io/crates/once_cell");
         }
@@ -347,7 +345,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallUnstable {
         let mut err = ccx
             .tcx
             .sess
-            .create_err(UnstableConstFn { span, def_path: ccx.tcx.def_path_str(def_id) });
+            .create_err(errors::UnstableConstFn { span, def_path: ccx.tcx.def_path_str(def_id) });
 
         if ccx.is_const_stable_const_fn() {
             err.help("const-stable functions can only call other const-stable functions");
@@ -380,14 +378,14 @@ impl<'tcx> NonConstOp<'tcx> for Generator {
         ccx: &ConstCx<'_, 'tcx>,
         span: Span,
     ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
-        let msg = format!("{}s are not allowed in {}s", self.0, ccx.const_kind());
+        let msg = format!("{}s are not allowed in {}s", self.0.descr(), ccx.const_kind());
         if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
             ccx.tcx.sess.create_feature_err(
-                UnallowedOpInConstContext { span, msg },
+                errors::UnallowedOpInConstContext { span, msg },
                 sym::const_async_blocks,
             )
         } else {
-            ccx.tcx.sess.create_err(UnallowedOpInConstContext { span, msg })
+            ccx.tcx.sess.create_err(errors::UnallowedOpInConstContext { span, msg })
         }
     }
 }
@@ -400,7 +398,7 @@ impl<'tcx> NonConstOp<'tcx> for HeapAllocation {
         ccx: &ConstCx<'_, 'tcx>,
         span: Span,
     ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
-        ccx.tcx.sess.create_err(UnallowedHeapAllocations {
+        ccx.tcx.sess.create_err(errors::UnallowedHeapAllocations {
             span,
             kind: ccx.const_kind(),
             teach: ccx.tcx.sess.teach(&error_code!(E0010)).then_some(()),
@@ -416,7 +414,7 @@ impl<'tcx> NonConstOp<'tcx> for InlineAsm {
         ccx: &ConstCx<'_, 'tcx>,
         span: Span,
     ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
-        ccx.tcx.sess.create_err(UnallowedInlineAsm { span, kind: ccx.const_kind() })
+        ccx.tcx.sess.create_err(errors::UnallowedInlineAsm { span, kind: ccx.const_kind() })
     }
 }
 
@@ -467,7 +465,9 @@ impl<'tcx> NonConstOp<'tcx> for TransientCellBorrow {
         ccx: &ConstCx<'_, 'tcx>,
         span: Span,
     ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
-        ccx.tcx.sess.create_feature_err(InteriorMutabilityBorrow { span }, sym::const_refs_to_cell)
+        ccx.tcx
+            .sess
+            .create_feature_err(errors::InteriorMutabilityBorrow { span }, sym::const_refs_to_cell)
     }
 }
 
@@ -484,14 +484,14 @@ impl<'tcx> NonConstOp<'tcx> for CellBorrow {
     ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
         // FIXME: Maybe a more elegant solution to this if else case
         if let hir::ConstContext::Static(_) = ccx.const_kind() {
-            ccx.tcx.sess.create_err(InteriorMutableDataRefer {
+            ccx.tcx.sess.create_err(errors::InteriorMutableDataRefer {
                 span,
                 opt_help: Some(()),
                 kind: ccx.const_kind(),
                 teach: ccx.tcx.sess.teach(&error_code!(E0492)).then_some(()),
             })
         } else {
-            ccx.tcx.sess.create_err(InteriorMutableDataRefer {
+            ccx.tcx.sess.create_err(errors::InteriorMutableDataRefer {
                 span,
                 opt_help: None,
                 kind: ccx.const_kind(),
@@ -524,12 +524,12 @@ impl<'tcx> NonConstOp<'tcx> for MutBorrow {
         span: Span,
     ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
         match self.0 {
-            hir::BorrowKind::Raw => ccx.tcx.sess.create_err(UnallowedMutableRefsRaw {
+            hir::BorrowKind::Raw => ccx.tcx.sess.create_err(errors::UnallowedMutableRefsRaw {
                 span,
                 kind: ccx.const_kind(),
                 teach: ccx.tcx.sess.teach(&error_code!(E0764)).then_some(()),
             }),
-            hir::BorrowKind::Ref => ccx.tcx.sess.create_err(UnallowedMutableRefs {
+            hir::BorrowKind::Ref => ccx.tcx.sess.create_err(errors::UnallowedMutableRefs {
                 span,
                 kind: ccx.const_kind(),
                 teach: ccx.tcx.sess.teach(&error_code!(E0764)).then_some(()),
@@ -553,14 +553,14 @@ impl<'tcx> NonConstOp<'tcx> for TransientMutBorrow {
     ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
         let kind = ccx.const_kind();
         match self.0 {
-            hir::BorrowKind::Raw => ccx
-                .tcx
-                .sess
-                .create_feature_err(TransientMutBorrowErrRaw { span, kind }, sym::const_mut_refs),
-            hir::BorrowKind::Ref => ccx
-                .tcx
-                .sess
-                .create_feature_err(TransientMutBorrowErr { span, kind }, sym::const_mut_refs),
+            hir::BorrowKind::Raw => ccx.tcx.sess.create_feature_err(
+                errors::TransientMutBorrowErrRaw { span, kind },
+                sym::const_mut_refs,
+            ),
+            hir::BorrowKind::Ref => ccx.tcx.sess.create_feature_err(
+                errors::TransientMutBorrowErr { span, kind },
+                sym::const_mut_refs,
+            ),
         }
     }
 }
@@ -582,9 +582,10 @@ impl<'tcx> NonConstOp<'tcx> for MutDeref {
         ccx: &ConstCx<'_, 'tcx>,
         span: Span,
     ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
-        ccx.tcx
-            .sess
-            .create_feature_err(MutDerefErr { span, kind: ccx.const_kind() }, sym::const_mut_refs)
+        ccx.tcx.sess.create_feature_err(
+            errors::MutDerefErr { span, kind: ccx.const_kind() },
+            sym::const_mut_refs,
+        )
     }
 }
 
@@ -597,7 +598,7 @@ impl<'tcx> NonConstOp<'tcx> for PanicNonStr {
         ccx: &ConstCx<'_, 'tcx>,
         span: Span,
     ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
-        ccx.tcx.sess.create_err(PanicNonStrErr { span })
+        ccx.tcx.sess.create_err(errors::PanicNonStrErr { span })
     }
 }
 
@@ -648,7 +649,7 @@ impl<'tcx> NonConstOp<'tcx> for RawPtrToIntCast {
         ccx: &ConstCx<'_, 'tcx>,
         span: Span,
     ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
-        ccx.tcx.sess.create_err(RawPtrToIntErr { span })
+        ccx.tcx.sess.create_err(errors::RawPtrToIntErr { span })
     }
 }
 
@@ -669,7 +670,7 @@ impl<'tcx> NonConstOp<'tcx> for StaticAccess {
         ccx: &ConstCx<'_, 'tcx>,
         span: Span,
     ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
-        ccx.tcx.sess.create_err(StaticAccessErr {
+        ccx.tcx.sess.create_err(errors::StaticAccessErr {
             span,
             kind: ccx.const_kind(),
             teach: ccx.tcx.sess.teach(&error_code!(E0013)).then_some(()),
@@ -686,11 +687,11 @@ impl<'tcx> NonConstOp<'tcx> for ThreadLocalAccess {
         ccx: &ConstCx<'_, 'tcx>,
         span: Span,
     ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
-        ccx.tcx.sess.create_err(NonConstOpErr { span })
+        ccx.tcx.sess.create_err(errors::NonConstOpErr { span })
     }
 }
 
-// Types that cannot appear in the signature or locals of a `const fn`.
+/// Types that cannot appear in the signature or locals of a `const fn`.
 pub mod ty {
     use super::*;
 
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
index d4570c59889..43806035a44 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
@@ -80,8 +80,7 @@ impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
         trace!("visit_terminator: terminator={:?} location={:?}", terminator, location);
 
         match &terminator.kind {
-            mir::TerminatorKind::Drop { place: dropped_place, .. }
-            | mir::TerminatorKind::DropAndReplace { place: dropped_place, .. } => {
+            mir::TerminatorKind::Drop { place: dropped_place, .. } => {
                 let dropped_ty = dropped_place.ty(self.body, self.tcx).ty;
                 if !NeedsNonConstDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
                     // Instead of throwing a bug, we just return here. This is because we have to
@@ -95,7 +94,7 @@ impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
                 }
 
                 // Drop elaboration is not precise enough to accept code like
-                // `src/test/ui/consts/control-flow/drop-pass.rs`; e.g., when an `Option<Vec<T>>` is
+                // `tests/ui/consts/control-flow/drop-pass.rs`; e.g., when an `Option<Vec<T>>` is
                 // initialized with `None` and never changed, it still emits drop glue.
                 // Hence we additionally check the qualifs here to allow more code to pass.
                 if self.qualifs.needs_non_const_drop(self.ccx, dropped_place.local, location) {
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
index d995d533ca3..bb4b7ad50b8 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -153,19 +153,12 @@ impl Qualif for NeedsNonConstDrop {
             return false;
         }
 
-        let destruct = cx.tcx.require_lang_item(LangItem::Destruct, None);
-
         let obligation = Obligation::new(
-            ObligationCause::dummy(),
+            cx.tcx,
+            ObligationCause::dummy_with_span(cx.body.span),
             cx.param_env,
-            ty::Binder::dummy(ty::TraitPredicate {
-                trait_ref: ty::TraitRef {
-                    def_id: destruct,
-                    substs: cx.tcx.mk_substs_trait(ty, &[]),
-                },
-                constness: ty::BoundConstness::ConstIfConst,
-                polarity: ty::ImplPolarity::Positive,
-            }),
+            ty::Binder::dummy(cx.tcx.at(cx.body.span).mk_trait_ref(LangItem::Destruct, [ty]))
+                .with_constness(ty::BoundConstness::ConstIfConst),
         );
 
         let infcx = cx.tcx.infer_ctxt().build();
@@ -224,10 +217,10 @@ impl Qualif for CustomEq {
 
     fn in_adt_inherently<'tcx>(
         cx: &ConstCx<'_, 'tcx>,
-        adt: AdtDef<'tcx>,
+        def: AdtDef<'tcx>,
         substs: SubstsRef<'tcx>,
     ) -> bool {
-        let ty = cx.tcx.mk_ty(ty::Adt(adt, substs));
+        let ty = cx.tcx.mk_adt(def, substs);
         !ty.is_structural_eq_shallow(cx.tcx)
     }
 }
@@ -351,7 +344,11 @@ where
     // FIXME(valtrees): check whether const qualifs should behave the same
     // way for type and mir constants.
     let uneval = match constant.literal {
-        ConstantKind::Ty(ct) if matches!(ct.kind(), ty::ConstKind::Param(_)) => None,
+        ConstantKind::Ty(ct)
+            if matches!(ct.kind(), ty::ConstKind::Param(_) | ty::ConstKind::Error(_)) =>
+        {
+            None
+        }
         ConstantKind::Ty(c) => bug!("expected ConstKind::Param here, found {:?}", c),
         ConstantKind::Unevaluated(uv, _) => Some(uv),
         ConstantKind::Val(..) => None,
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
index 805e6096b35..78c74e1892d 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
@@ -222,23 +222,8 @@ where
         // The effect of assignment to the return place in `TerminatorKind::Call` is not applied
         // here; that occurs in `apply_call_return_effect`.
 
-        if let mir::TerminatorKind::DropAndReplace { value, place, .. } = &terminator.kind {
-            let qualif = qualifs::in_operand::<Q, _>(
-                self.ccx,
-                &mut |l| self.state.qualif.contains(l),
-                value,
-            );
-
-            if !place.is_indirect() {
-                self.assign_qualif_direct(place, qualif);
-            }
-        }
-
         // We ignore borrow on drop because custom drop impls are not allowed in consts.
         // FIXME: Reconsider if accounting for borrows in drops is necessary for const drop.
-
-        // We need to assign qualifs to the dropped location before visiting the operand that
-        // replaces it since qualifs can be cleared on move.
         self.super_terminator(terminator, location);
     }
 }
diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs
index f48bcd90809..3f3b66b0645 100644
--- a/compiler/rustc_const_eval/src/transform/promote_consts.rs
+++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs
@@ -18,7 +18,7 @@ use rustc_middle::mir::traversal::ReversePostorderIter;
 use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
 use rustc_middle::mir::*;
 use rustc_middle::ty::subst::InternalSubsts;
-use rustc_middle::ty::{self, List, TyCtxt, TypeVisitable};
+use rustc_middle::ty::{self, List, TyCtxt, TypeVisitableExt};
 use rustc_span::Span;
 
 use rustc_index::vec::{Idx, IndexVec};
@@ -133,7 +133,7 @@ impl<'tcx> Visitor<'tcx> for Collector<'_, 'tcx> {
                 }
                 _ => { /* mark as unpromotable below */ }
             }
-        } else if let TempState::Defined { ref mut uses, .. } = *temp {
+        } else if let TempState::Defined { uses, .. } = temp {
             // We always allow borrows, even mutable ones, as we need
             // to promote mutable borrows of some ZSTs e.g., `&mut []`.
             let allowed_use = match context {
@@ -216,12 +216,6 @@ impl<'tcx> Validator<'_, 'tcx> {
                     return Err(Unpromotable);
                 }
 
-                // We cannot promote things that need dropping, since the promoted value
-                // would not get dropped.
-                if self.qualif_local::<qualifs::NeedsDrop>(place.local) {
-                    return Err(Unpromotable);
-                }
-
                 Ok(())
             }
             _ => bug!(),
@@ -262,13 +256,17 @@ impl<'tcx> Validator<'_, 'tcx> {
                 }
             }
         } else {
-            let span = self.body.local_decls[local].source_info.span;
-            span_bug!(span, "{:?} not promotable, qualif_local shouldn't have been called", local);
+            false
         }
     }
 
     fn validate_local(&mut self, local: Local) -> Result<(), Unpromotable> {
         if let TempState::Defined { location: loc, uses, valid } = self.temps[local] {
+            // We cannot promote things that need dropping, since the promoted value
+            // would not get dropped.
+            if self.qualif_local::<qualifs::NeedsDrop>(local) {
+                return Err(Unpromotable);
+            }
             valid.or_else(|_| {
                 let ok = {
                     let block = &self.body[loc.block];
@@ -318,14 +316,14 @@ impl<'tcx> Validator<'_, 'tcx> {
                 match elem {
                     ProjectionElem::Deref => {
                         let mut promotable = false;
+                        // When a static is used by-value, that gets desugared to `*STATIC_ADDR`,
+                        // and we need to be able to promote this. So check if this deref matches
+                        // that specific pattern.
+
                         // We need to make sure this is a `Deref` of a local with no further projections.
                         // Discussion can be found at
                         // https://github.com/rust-lang/rust/pull/74945#discussion_r463063247
                         if let Some(local) = place_base.as_local() {
-                            // This is a special treatment for cases like *&STATIC where STATIC is a
-                            // global static variable.
-                            // This pattern is generated only when global static variables are directly
-                            // accessed and is qualified for promotion safely.
                             if let TempState::Defined { location, .. } = self.temps[local] {
                                 let def_stmt = self.body[location.block]
                                     .statements
@@ -366,31 +364,33 @@ impl<'tcx> Validator<'_, 'tcx> {
                     ProjectionElem::Index(local) => {
                         let mut promotable = false;
                         // Only accept if we can predict the index and are indexing an array.
-                        let val =
-                            if let TempState::Defined { location: loc, .. } = self.temps[local] {
-                                let block = &self.body[loc.block];
-                                if loc.statement_index < block.statements.len() {
-                                    let statement = &block.statements[loc.statement_index];
-                                    match &statement.kind {
-                                        StatementKind::Assign(box (
-                                            _,
-                                            Rvalue::Use(Operand::Constant(c)),
-                                        )) => c.literal.try_eval_usize(self.tcx, self.param_env),
-                                        _ => None,
-                                    }
-                                } else {
-                                    None
+                        let val = if let TempState::Defined { location: loc, .. } =
+                            self.temps[local]
+                        {
+                            let block = &self.body[loc.block];
+                            if loc.statement_index < block.statements.len() {
+                                let statement = &block.statements[loc.statement_index];
+                                match &statement.kind {
+                                    StatementKind::Assign(box (
+                                        _,
+                                        Rvalue::Use(Operand::Constant(c)),
+                                    )) => c.literal.try_eval_target_usize(self.tcx, self.param_env),
+                                    _ => None,
                                 }
                             } else {
                                 None
-                            };
+                            }
+                        } else {
+                            None
+                        };
                         if let Some(idx) = val {
                             // Determine the type of the thing we are indexing.
                             let ty = place_base.ty(self.body, self.tcx).ty;
                             match ty.kind() {
                                 ty::Array(_, len) => {
                                     // It's an array; determine its length.
-                                    if let Some(len) = len.try_eval_usize(self.tcx, self.param_env)
+                                    if let Some(len) =
+                                        len.try_eval_target_usize(self.tcx, self.param_env)
                                     {
                                         // If the index is in-bounds, go ahead.
                                         if idx < len {
@@ -472,7 +472,7 @@ impl<'tcx> Validator<'_, 'tcx> {
                 // mutably without consequences. However, only &mut []
                 // is allowed right now.
                 if let ty::Array(_, len) = ty.kind() {
-                    match len.try_eval_usize(self.tcx, self.param_env) {
+                    match len.try_eval_target_usize(self.tcx, self.param_env) {
                         Some(0) => {}
                         _ => return Err(Unpromotable),
                     }
@@ -750,7 +750,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
         if loc.statement_index < num_stmts {
             let (mut rvalue, source_info) = {
                 let statement = &mut self.source[loc.block].statements[loc.statement_index];
-                let StatementKind::Assign(box (_, ref mut rhs)) = statement.kind else {
+                let StatementKind::Assign(box (_, rhs)) = &mut statement.kind else {
                     span_bug!(
                         statement.source_info.span,
                         "{:?} is not an assignment",
@@ -780,9 +780,9 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
                 self.source[loc.block].terminator().clone()
             } else {
                 let terminator = self.source[loc.block].terminator_mut();
-                let target = match terminator.kind {
-                    TerminatorKind::Call { target: Some(target), .. } => target,
-                    ref kind => {
+                let target = match &terminator.kind {
+                    TerminatorKind::Call { target: Some(target), .. } => *target,
+                    kind => {
                         span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
                     }
                 };
@@ -816,7 +816,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
                         ..terminator
                     };
                 }
-                ref kind => {
+                kind => {
                     span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
                 }
             };
@@ -849,60 +849,56 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
             let local_decls = &mut self.source.local_decls;
             let loc = candidate.location;
             let statement = &mut blocks[loc.block].statements[loc.statement_index];
-            match statement.kind {
-                StatementKind::Assign(box (
-                    _,
-                    Rvalue::Ref(ref mut region, borrow_kind, ref mut place),
-                )) => {
-                    // Use the underlying local for this (necessarily interior) borrow.
-                    let ty = local_decls[place.local].ty;
-                    let span = statement.source_info.span;
-
-                    let ref_ty = tcx.mk_ref(
-                        tcx.lifetimes.re_erased,
-                        ty::TypeAndMut { ty, mutbl: borrow_kind.to_mutbl_lossy() },
-                    );
+            let StatementKind::Assign(box (_, Rvalue::Ref(region, borrow_kind, place))) = &mut statement.kind else {
+                bug!()
+            };
 
-                    *region = tcx.lifetimes.re_erased;
-
-                    let mut projection = vec![PlaceElem::Deref];
-                    projection.extend(place.projection);
-                    place.projection = tcx.intern_place_elems(&projection);
-
-                    // Create a temp to hold the promoted reference.
-                    // This is because `*r` requires `r` to be a local,
-                    // otherwise we would use the `promoted` directly.
-                    let mut promoted_ref = LocalDecl::new(ref_ty, span);
-                    promoted_ref.source_info = statement.source_info;
-                    let promoted_ref = local_decls.push(promoted_ref);
-                    assert_eq!(self.temps.push(TempState::Unpromotable), promoted_ref);
-
-                    let promoted_ref_statement = Statement {
-                        source_info: statement.source_info,
-                        kind: StatementKind::Assign(Box::new((
-                            Place::from(promoted_ref),
-                            Rvalue::Use(promoted_operand(ref_ty, span)),
-                        ))),
-                    };
-                    self.extra_statements.push((loc, promoted_ref_statement));
-
-                    Rvalue::Ref(
-                        tcx.lifetimes.re_erased,
-                        borrow_kind,
-                        Place {
-                            local: mem::replace(&mut place.local, promoted_ref),
-                            projection: List::empty(),
-                        },
-                    )
-                }
-                _ => bug!(),
-            }
+            // Use the underlying local for this (necessarily interior) borrow.
+            let ty = local_decls[place.local].ty;
+            let span = statement.source_info.span;
+
+            let ref_ty = tcx.mk_ref(
+                tcx.lifetimes.re_erased,
+                ty::TypeAndMut { ty, mutbl: borrow_kind.to_mutbl_lossy() },
+            );
+
+            *region = tcx.lifetimes.re_erased;
+
+            let mut projection = vec![PlaceElem::Deref];
+            projection.extend(place.projection);
+            place.projection = tcx.mk_place_elems(&projection);
+
+            // Create a temp to hold the promoted reference.
+            // This is because `*r` requires `r` to be a local,
+            // otherwise we would use the `promoted` directly.
+            let mut promoted_ref = LocalDecl::new(ref_ty, span);
+            promoted_ref.source_info = statement.source_info;
+            let promoted_ref = local_decls.push(promoted_ref);
+            assert_eq!(self.temps.push(TempState::Unpromotable), promoted_ref);
+
+            let promoted_ref_statement = Statement {
+                source_info: statement.source_info,
+                kind: StatementKind::Assign(Box::new((
+                    Place::from(promoted_ref),
+                    Rvalue::Use(promoted_operand(ref_ty, span)),
+                ))),
+            };
+            self.extra_statements.push((loc, promoted_ref_statement));
+
+            Rvalue::Ref(
+                tcx.lifetimes.re_erased,
+                *borrow_kind,
+                Place {
+                    local: mem::replace(&mut place.local, promoted_ref),
+                    projection: List::empty(),
+                },
+            )
         };
 
         assert_eq!(self.new_block(), START_BLOCK);
         self.visit_rvalue(
             &mut rvalue,
-            Location { block: BasicBlock::new(0), statement_index: usize::MAX },
+            Location { block: START_BLOCK, statement_index: usize::MAX },
         );
 
         let span = self.promoted.span;
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
index 81b82a21fa1..49b1e6d967c 100644
--- a/compiler/rustc_const_eval/src/transform/validate.rs
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -1,25 +1,25 @@
 //! Validates the MIR to ensure that invariants are upheld.
 
-use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 use rustc_index::bit_set::BitSet;
-use rustc_infer::infer::TyCtxtInferExt;
+use rustc_index::vec::IndexVec;
+use rustc_infer::traits::Reveal;
 use rustc_middle::mir::interpret::Scalar;
 use rustc_middle::mir::visit::NonUseContext::VarDebugInfo;
 use rustc_middle::mir::visit::{PlaceContext, Visitor};
 use rustc_middle::mir::{
-    traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, CastKind, CopyNonOverlapping,
-    Local, Location, MirPass, MirPhase, NonDivergingIntrinsic, Operand, Place, PlaceElem, PlaceRef,
-    ProjectionElem, RuntimePhase, Rvalue, SourceScope, Statement, StatementKind, Terminator,
+    traversal, BasicBlock, BinOp, Body, BorrowKind, CastKind, CopyNonOverlapping, Local, Location,
+    MirPass, MirPhase, NonDivergingIntrinsic, Operand, Place, PlaceElem, PlaceRef, ProjectionElem,
+    RetagKind, RuntimePhase, Rvalue, SourceScope, Statement, StatementKind, Terminator,
     TerminatorKind, UnOp, START_BLOCK,
 };
-use rustc_middle::ty::fold::BottomUpFolder;
-use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeFoldable, TypeVisitable};
+use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt};
 use rustc_mir_dataflow::impls::MaybeStorageLive;
 use rustc_mir_dataflow::storage::always_storage_live_locals;
 use rustc_mir_dataflow::{Analysis, ResultsCursor};
 use rustc_target::abi::{Size, VariantIdx};
 
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
 enum EdgeKind {
     Unwind,
     Normal,
@@ -46,66 +46,44 @@ impl<'tcx> MirPass<'tcx> for Validator {
             return;
         }
         let def_id = body.source.def_id();
-        let param_env = tcx.param_env(def_id);
         let mir_phase = self.mir_phase;
+        let param_env = match mir_phase.reveal() {
+            Reveal::UserFacing => tcx.param_env(def_id),
+            Reveal::All => tcx.param_env_reveal_all_normalized(def_id),
+        };
 
         let always_live_locals = always_storage_live_locals(body);
-        let storage_liveness = MaybeStorageLive::new(always_live_locals)
+        let storage_liveness = MaybeStorageLive::new(std::borrow::Cow::Owned(always_live_locals))
             .into_engine(tcx, body)
             .iterate_to_fixpoint()
             .into_results_cursor(body);
 
-        TypeChecker {
+        let mut checker = TypeChecker {
             when: &self.when,
             body,
             tcx,
             param_env,
             mir_phase,
+            unwind_edge_count: 0,
             reachable_blocks: traversal::reachable_as_bitset(body),
             storage_liveness,
             place_cache: Vec::new(),
             value_cache: Vec::new(),
+        };
+        checker.visit_body(body);
+        checker.check_cleanup_control_flow();
+
+        if let MirPhase::Runtime(_) = body.phase {
+            if let ty::InstanceDef::Item(_) = body.source.instance {
+                if body.has_free_regions() {
+                    checker.fail(
+                        Location::START,
+                        format!("Free regions in optimized {} MIR", body.phase.name()),
+                    );
+                }
+            }
         }
-        .visit_body(body);
-    }
-}
-
-/// Returns whether the two types are equal up to lifetimes.
-/// All lifetimes, including higher-ranked ones, get ignored for this comparison.
-/// (This is unlike the `erasing_regions` methods, which keep higher-ranked lifetimes for soundness reasons.)
-///
-/// The point of this function is to approximate "equal up to subtyping".  However,
-/// the approximation is incorrect as variance is ignored.
-pub fn equal_up_to_regions<'tcx>(
-    tcx: TyCtxt<'tcx>,
-    param_env: ParamEnv<'tcx>,
-    src: Ty<'tcx>,
-    dest: Ty<'tcx>,
-) -> bool {
-    // Fast path.
-    if src == dest {
-        return true;
     }
-
-    // Normalize lifetimes away on both sides, then compare.
-    let normalize = |ty: Ty<'tcx>| {
-        tcx.try_normalize_erasing_regions(param_env, ty).unwrap_or(ty).fold_with(
-            &mut BottomUpFolder {
-                tcx,
-                // FIXME: We erase all late-bound lifetimes, but this is not fully correct.
-                // If you have a type like `<for<'a> fn(&'a u32) as SomeTrait>::Assoc`,
-                // this is not necessarily equivalent to `<fn(&'static u32) as SomeTrait>::Assoc`,
-                // since one may have an `impl SomeTrait for fn(&32)` and
-                // `impl SomeTrait for fn(&'static u32)` at the same time which
-                // specify distinct values for Assoc. (See also #56105)
-                lt_op: |_| tcx.lifetimes.re_erased,
-                // Leave consts and types unchanged.
-                ct_op: |ct| ct,
-                ty_op: |ty| ty,
-            },
-        )
-    };
-    tcx.infer_ctxt().build().can_eq(param_env, normalize(src), normalize(dest)).is_ok()
 }
 
 struct TypeChecker<'a, 'tcx> {
@@ -114,13 +92,15 @@ struct TypeChecker<'a, 'tcx> {
     tcx: TyCtxt<'tcx>,
     param_env: ParamEnv<'tcx>,
     mir_phase: MirPhase,
+    unwind_edge_count: usize,
     reachable_blocks: BitSet<BasicBlock>,
-    storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive>,
+    storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive<'static>>,
     place_cache: Vec<PlaceRef<'tcx>>,
     value_cache: Vec<u128>,
 }
 
 impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+    #[track_caller]
     fn fail(&self, location: Location, msg: impl AsRef<str>) {
         let span = self.body.source_info(location).span;
         // We use `delay_span_bug` as we might see broken MIR when other errors have already
@@ -137,7 +117,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
         );
     }
 
-    fn check_edge(&self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
+    fn check_edge(&mut self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
         if bb == START_BLOCK {
             self.fail(location, "start block must not have predecessors")
         }
@@ -146,10 +126,12 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
             match (src.is_cleanup, bb.is_cleanup, edge_kind) {
                 // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
                 (false, false, EdgeKind::Normal)
-                // Non-cleanup blocks can jump to cleanup blocks along unwind edges
-                | (false, true, EdgeKind::Unwind)
                 // Cleanup blocks can jump to cleanup blocks along non-unwind edges
                 | (true, true, EdgeKind::Normal) => {}
+                // Non-cleanup blocks can jump to cleanup blocks along unwind edges
+                (false, true, EdgeKind::Unwind) => {
+                    self.unwind_edge_count += 1;
+                }
                 // All other jumps are invalid
                 _ => {
                     self.fail(
@@ -169,6 +151,88 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
         }
     }
 
+    fn check_cleanup_control_flow(&self) {
+        if self.unwind_edge_count <= 1 {
+            return;
+        }
+        let doms = self.body.basic_blocks.dominators();
+        let mut post_contract_node = FxHashMap::default();
+        // Reusing the allocation across invocations of the closure
+        let mut dom_path = vec![];
+        let mut get_post_contract_node = |mut bb| {
+            let root = loop {
+                if let Some(root) = post_contract_node.get(&bb) {
+                    break *root;
+                }
+                let parent = doms.immediate_dominator(bb);
+                dom_path.push(bb);
+                if !self.body.basic_blocks[parent].is_cleanup {
+                    break bb;
+                }
+                bb = parent;
+            };
+            for bb in dom_path.drain(..) {
+                post_contract_node.insert(bb, root);
+            }
+            root
+        };
+
+        let mut parent = IndexVec::from_elem(None, &self.body.basic_blocks);
+        for (bb, bb_data) in self.body.basic_blocks.iter_enumerated() {
+            if !bb_data.is_cleanup || !self.reachable_blocks.contains(bb) {
+                continue;
+            }
+            let bb = get_post_contract_node(bb);
+            for s in bb_data.terminator().successors() {
+                let s = get_post_contract_node(s);
+                if s == bb {
+                    continue;
+                }
+                let parent = &mut parent[bb];
+                match parent {
+                    None => {
+                        *parent = Some(s);
+                    }
+                    Some(e) if *e == s => (),
+                    Some(e) => self.fail(
+                        Location { block: bb, statement_index: 0 },
+                        format!(
+                            "Cleanup control flow violation: The blocks dominated by {:?} have edges to both {:?} and {:?}",
+                            bb,
+                            s,
+                            *e
+                        )
+                    ),
+                }
+            }
+        }
+
+        // Check for cycles
+        let mut stack = FxHashSet::default();
+        for i in 0..parent.len() {
+            let mut bb = BasicBlock::from_usize(i);
+            stack.clear();
+            stack.insert(bb);
+            loop {
+                let Some(parent)= parent[bb].take() else {
+                    break
+                };
+                let no_cycle = stack.insert(parent);
+                if !no_cycle {
+                    self.fail(
+                        Location { block: bb, statement_index: 0 },
+                        format!(
+                            "Cleanup control flow violation: Cycle involving edge {:?} -> {:?}",
+                            bb, parent,
+                        ),
+                    );
+                    break;
+                }
+                bb = parent;
+            }
+        }
+    }
+
     /// Check if src can be assigned into dest.
     /// This is not precise, it will accept some incorrect assignments.
     fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
@@ -177,28 +241,17 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
             // Equal types, all is good.
             return true;
         }
-        // Normalization reveals opaque types, but we may be validating MIR while computing
-        // said opaque types, causing cycles.
-        if (src, dest).has_opaque_types() {
-            return true;
-        }
 
-        // Normalize projections and things like that.
-        // Type-changing assignments can happen when subtyping is used. While
-        // all normal lifetimes are erased, higher-ranked types with their
-        // late-bound lifetimes are still around and can lead to type
-        // differences. So we compare ignoring lifetimes.
-
-        // First, try with reveal_all. This might not work in some cases, as the predicates
-        // can be cleared in reveal_all mode. We try the reveal first anyways as it is used
-        // by some other passes like inlining as well.
-        let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
-        if equal_up_to_regions(self.tcx, param_env, src, dest) {
+        // We sometimes have to use `defining_opaque_types` for subtyping
+        // to succeed here and figuring out how exactly that should work
+        // is annoying. It is harmless enough to just not validate anything
+        // in that case. We still check this after analysis as all opque
+        // types have been revealed at this point.
+        if (src, dest).has_opaque_types() {
             return true;
         }
 
-        // If this fails, we can try it without the reveal.
-        equal_up_to_regions(self.tcx, self.param_env, src, dest)
+        crate::util::is_subtype(self.tcx, self.param_env, src, dest)
     }
 }
 
@@ -273,7 +326,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 }
             }
             ProjectionElem::Field(f, ty) => {
-                let parent = Place { local, projection: self.tcx.intern_place_elems(proj_base) };
+                let parent = Place { local, projection: self.tcx.mk_place_elems(proj_base) };
                 let parent_ty = parent.ty(&self.body.local_decls, self.tcx);
                 let fail_out_of_bounds = |this: &Self, location| {
                     this.fail(location, format!("Out of bounds field {:?} for {:?}", f, parent_ty));
@@ -281,18 +334,18 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 let check_equal = |this: &Self, location, f_ty| {
                     if !this.mir_assign_valid_types(ty, f_ty) {
                         this.fail(
-                        location,
-                        format!(
-                            "Field projection `{:?}.{:?}` specified type `{:?}`, but actual type is `{:?}`",
-                            parent, f, ty, f_ty
+                            location,
+                            format!(
+                                "Field projection `{:?}.{:?}` specified type `{:?}`, but actual type is `{:?}`",
+                                parent, f, ty, f_ty
+                            )
                         )
-                    )
                     }
                 };
 
                 let kind = match parent_ty.ty.kind() {
-                    &ty::Opaque(def_id, substs) => {
-                        self.tcx.bound_type_of(def_id).subst(self.tcx, substs).kind()
+                    &ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => {
+                        self.tcx.type_of(def_id).subst(self.tcx, substs).kind()
                     }
                     kind => kind,
                 };
@@ -339,12 +392,12 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                                 return;
                             };
 
-                            let Some(&f_ty) = layout.field_tys.get(local) else {
+                            let Some(f_ty) = layout.field_tys.get(local) else {
                                 self.fail(location, format!("Out of bounds local {:?} for {:?}", local, parent_ty));
                                 return;
                             };
 
-                            f_ty
+                            f_ty.ty
                         } else {
                             let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else {
                                 fail_out_of_bounds(self, location);
@@ -390,19 +443,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
             };
         }
         match rvalue {
-            Rvalue::Use(_) | Rvalue::CopyForDeref(_) => {}
-            Rvalue::Aggregate(agg_kind, _) => {
-                let disallowed = match **agg_kind {
-                    AggregateKind::Array(..) => false,
-                    _ => self.mir_phase >= MirPhase::Runtime(RuntimePhase::PostCleanup),
-                };
-                if disallowed {
-                    self.fail(
-                        location,
-                        format!("{:?} have been lowered to field assignments", rvalue),
-                    )
-                }
-            }
+            Rvalue::Use(_) | Rvalue::CopyForDeref(_) | Rvalue::Aggregate(..) => {}
             Rvalue::Ref(_, BorrowKind::Shallow, _) => {
                 if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
                     self.fail(
@@ -638,6 +679,14 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                     }
                 }
             }
+            StatementKind::PlaceMention(..) => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`PlaceMention` should have been removed after drop lowering phase",
+                    );
+                }
+            }
             StatementKind::AscribeUserType(..) => {
                 if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
                     self.fail(
@@ -702,7 +751,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                     self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
                 }
                 let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind();
-                if !matches!(pty, ty::Adt(..) | ty::Generator(..) | ty::Opaque(..)) {
+                if !matches!(pty, ty::Adt(..) | ty::Generator(..) | ty::Alias(ty::Opaque, ..)) {
                     self.fail(
                         location,
                         format!(
@@ -717,14 +766,36 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                     self.fail(location, "`Deinit`is not allowed until deaggregation");
                 }
             }
-            StatementKind::Retag(_, _) => {
+            StatementKind::Retag(kind, _) => {
                 // FIXME(JakobDegen) The validator should check that `self.mir_phase <
                 // DropsLowered`. However, this causes ICEs with generation of drop shims, which
                 // seem to fail to set their `MirPhase` correctly.
+                if matches!(kind, RetagKind::Raw | RetagKind::TwoPhase) {
+                    self.fail(location, format!("explicit `{:?}` is forbidden", kind));
+                }
+            }
+            StatementKind::StorageLive(local) => {
+                // We check that the local is not live when entering a `StorageLive` for it.
+                // Technically, violating this restriction is only UB and not actually indicative
+                // of not well-formed MIR. This means that an optimization which turns MIR that
+                // already has UB into MIR that fails this check is not necessarily wrong. However,
+                // we have no such optimizations at the moment, and so we include this check anyway
+                // to help us catch bugs. If you happen to write an optimization that might cause
+                // this to incorrectly fire, feel free to remove this check.
+                if self.reachable_blocks.contains(location.block) {
+                    self.storage_liveness.seek_before_primary_effect(location);
+                    let locals_with_storage = self.storage_liveness.get();
+                    if locals_with_storage.contains(*local) {
+                        self.fail(
+                            location,
+                            format!("StorageLive({local:?}) which already has storage here"),
+                        );
+                    }
+                }
             }
-            StatementKind::StorageLive(..)
-            | StatementKind::StorageDead(..)
+            StatementKind::StorageDead(_)
             | StatementKind::Coverage(_)
+            | StatementKind::ConstEvalCounter
             | StatementKind::Nop => {}
         }
 
@@ -736,17 +807,8 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
             TerminatorKind::Goto { target } => {
                 self.check_edge(location, *target, EdgeKind::Normal);
             }
-            TerminatorKind::SwitchInt { targets, switch_ty, discr } => {
-                let ty = discr.ty(&self.body.local_decls, self.tcx);
-                if ty != *switch_ty {
-                    self.fail(
-                        location,
-                        format!(
-                            "encountered `SwitchInt` terminator with type mismatch: {:?} != {:?}",
-                            ty, switch_ty,
-                        ),
-                    );
-                }
+            TerminatorKind::SwitchInt { targets, discr } => {
+                let switch_ty = discr.ty(&self.body.local_decls, self.tcx);
 
                 let target_width = self.tcx.sess.target.pointer_width;
 
@@ -792,18 +854,6 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                     self.check_edge(location, *unwind, EdgeKind::Unwind);
                 }
             }
-            TerminatorKind::DropAndReplace { target, unwind, .. } => {
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(
-                        location,
-                        "`DropAndReplace` should have been removed during drop elaboration",
-                    );
-                }
-                self.check_edge(location, *target, EdgeKind::Normal);
-                if let Some(unwind) = unwind {
-                    self.check_edge(location, *unwind, EdgeKind::Unwind);
-                }
-            }
             TerminatorKind::Call { func, args, destination, target, cleanup, .. } => {
                 let func_ty = func.ty(&self.body.local_decls, self.tcx);
                 match func_ty.kind() {
diff --git a/compiler/rustc_const_eval/src/util/aggregate.rs b/compiler/rustc_const_eval/src/util/aggregate.rs
deleted file mode 100644
index 180a40043db..00000000000
--- a/compiler/rustc_const_eval/src/util/aggregate.rs
+++ /dev/null
@@ -1,77 +0,0 @@
-use rustc_index::vec::Idx;
-use rustc_middle::mir::*;
-use rustc_middle::ty::{Ty, TyCtxt};
-use rustc_target::abi::VariantIdx;
-
-use std::convert::TryFrom;
-use std::iter::TrustedLen;
-
-/// Expand `lhs = Rvalue::Aggregate(kind, operands)` into assignments to the fields.
-///
-/// Produces something like
-///
-/// (lhs as Variant).field0 = arg0;     // We only have a downcast if this is an enum
-/// (lhs as Variant).field1 = arg1;
-/// discriminant(lhs) = variant_index;  // If lhs is an enum or generator.
-pub fn expand_aggregate<'tcx>(
-    orig_lhs: Place<'tcx>,
-    operands: impl Iterator<Item = (Operand<'tcx>, Ty<'tcx>)> + TrustedLen,
-    kind: AggregateKind<'tcx>,
-    source_info: SourceInfo,
-    tcx: TyCtxt<'tcx>,
-) -> impl Iterator<Item = Statement<'tcx>> + TrustedLen {
-    let mut lhs = orig_lhs;
-    let mut set_discriminant = None;
-    let active_field_index = match kind {
-        AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
-            let adt_def = tcx.adt_def(adt_did);
-            if adt_def.is_enum() {
-                set_discriminant = Some(Statement {
-                    kind: StatementKind::SetDiscriminant {
-                        place: Box::new(orig_lhs),
-                        variant_index,
-                    },
-                    source_info,
-                });
-                lhs = tcx.mk_place_downcast(orig_lhs, adt_def, variant_index);
-            }
-            active_field_index
-        }
-        AggregateKind::Generator(..) => {
-            // Right now we only support initializing generators to
-            // variant 0 (Unresumed).
-            let variant_index = VariantIdx::new(0);
-            set_discriminant = Some(Statement {
-                kind: StatementKind::SetDiscriminant { place: Box::new(orig_lhs), variant_index },
-                source_info,
-            });
-
-            // Operands are upvars stored on the base place, so no
-            // downcast is necessary.
-
-            None
-        }
-        _ => None,
-    };
-
-    let operands = operands.enumerate().map(move |(i, (op, ty))| {
-        let lhs_field = if let AggregateKind::Array(_) = kind {
-            let offset = u64::try_from(i).unwrap();
-            tcx.mk_place_elem(
-                lhs,
-                ProjectionElem::ConstantIndex { offset, min_length: offset + 1, from_end: false },
-            )
-        } else {
-            let field = Field::new(active_field_index.unwrap_or(i));
-            tcx.mk_place_field(lhs, field, ty)
-        };
-        Statement {
-            source_info,
-            kind: StatementKind::Assign(Box::new((lhs_field, Rvalue::Use(op)))),
-        }
-    });
-    [Statement { source_info, kind: StatementKind::Deinit(Box::new(orig_lhs)) }]
-        .into_iter()
-        .chain(operands)
-        .chain(set_discriminant)
-}
diff --git a/compiler/rustc_const_eval/src/util/call_kind.rs b/compiler/rustc_const_eval/src/util/call_kind.rs
index 5446ccb1a47..995363c0edd 100644
--- a/compiler/rustc_const_eval/src/util/call_kind.rs
+++ b/compiler/rustc_const_eval/src/util/call_kind.rs
@@ -3,9 +3,9 @@
 //! context.
 
 use rustc_hir::def_id::DefId;
-use rustc_hir::lang_items;
+use rustc_hir::{lang_items, LangItem};
 use rustc_middle::ty::subst::SubstsRef;
-use rustc_middle::ty::{self, AssocItemContainer, DefIdTree, Instance, ParamEnv, Ty, TyCtxt};
+use rustc_middle::ty::{AssocItemContainer, Instance, ParamEnv, Ty, TyCtxt};
 use rustc_span::symbol::Ident;
 use rustc_span::{sym, DesugaringKind, Span};
 
@@ -26,7 +26,7 @@ impl CallDesugaringKind {
         match self {
             Self::ForLoopIntoIter => tcx.get_diagnostic_item(sym::IntoIterator).unwrap(),
             Self::QuestionBranch | Self::TryBlockFromOutput => {
-                tcx.lang_items().try_trait().unwrap()
+                tcx.require_lang_item(LangItem::Try, None)
             }
             Self::QuestionFromResidual => tcx.get_diagnostic_item(sym::FromResidual).unwrap(),
         }
@@ -39,9 +39,8 @@ pub enum CallKind<'tcx> {
     Normal {
         self_arg: Option<Ident>,
         desugaring: Option<(CallDesugaringKind, Ty<'tcx>)>,
-        /// Whether the self type of the method call has an `.as_ref()` method.
-        /// Used for better diagnostics.
-        is_option_or_result: bool,
+        method_did: DefId,
+        method_substs: SubstsRef<'tcx>,
     },
     /// A call to `Fn(..)::call(..)`, desugared from `my_closure(a, b, c)`
     FnCall { fn_trait_id: DefId, self_ty: Ty<'tcx> },
@@ -133,16 +132,6 @@ pub fn call_kind<'tcx>(
         } else {
             None
         };
-        let parent_did = tcx.parent(method_did);
-        let parent_self_ty = (tcx.def_kind(parent_did) == rustc_hir::def::DefKind::Impl)
-            .then_some(parent_did)
-            .and_then(|did| match tcx.type_of(did).kind() {
-                ty::Adt(def, ..) => Some(def.did()),
-                _ => None,
-            });
-        let is_option_or_result = parent_self_ty.map_or(false, |def_id| {
-            matches!(tcx.get_diagnostic_name(def_id), Some(sym::Option | sym::Result))
-        });
-        CallKind::Normal { self_arg, desugaring, is_option_or_result }
+        CallKind::Normal { self_arg, desugaring, method_did, method_substs }
     })
 }
diff --git a/compiler/rustc_const_eval/src/util/might_permit_raw_init.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
index 6ca71223391..23fcd22c52b 100644
--- a/compiler/rustc_const_eval/src/util/might_permit_raw_init.rs
+++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
@@ -1,9 +1,9 @@
-use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
-use rustc_middle::ty::{ParamEnv, TyCtxt};
+use rustc_middle::ty::layout::{LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement};
+use rustc_middle::ty::{ParamEnv, ParamEnvAnd, Ty, TyCtxt};
 use rustc_session::Limit;
-use rustc_target::abi::{Abi, FieldsShape, InitKind, Scalar, Variants};
+use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants};
 
-use crate::const_eval::CompileTimeInterpreter;
+use crate::const_eval::{CheckAlignment, CompileTimeInterpreter};
 use crate::interpret::{InterpCx, MemoryKind, OpTy};
 
 /// Determines if this type permits "raw" initialization by just transmuting some memory into an
@@ -18,16 +18,23 @@ use crate::interpret::{InterpCx, MemoryKind, OpTy};
 /// Rust UB as long as there is no risk of miscompilations. The `strict_init_checks` can be set to
 /// do a full check against Rust UB instead (in which case we will also ignore the 0x01-filling and
 /// to the full uninit check).
-pub fn might_permit_raw_init<'tcx>(
+pub fn check_validity_requirement<'tcx>(
     tcx: TyCtxt<'tcx>,
-    ty: TyAndLayout<'tcx>,
-    kind: InitKind,
-) -> bool {
-    if tcx.sess.opts.unstable_opts.strict_init_checks {
-        might_permit_raw_init_strict(ty, tcx, kind)
+    kind: ValidityRequirement,
+    param_env_and_ty: ParamEnvAnd<'tcx, Ty<'tcx>>,
+) -> Result<bool, LayoutError<'tcx>> {
+    let layout = tcx.layout_of(param_env_and_ty)?;
+
+    // There is nothing strict or lax about inhabitedness.
+    if kind == ValidityRequirement::Inhabited {
+        return Ok(!layout.abi.is_uninhabited());
+    }
+
+    if kind == ValidityRequirement::Uninit || tcx.sess.opts.unstable_opts.strict_init_checks {
+        might_permit_raw_init_strict(layout, tcx, kind)
     } else {
-        let layout_cx = LayoutCx { tcx, param_env: ParamEnv::reveal_all() };
-        might_permit_raw_init_lax(ty, &layout_cx, kind)
+        let layout_cx = LayoutCx { tcx, param_env: param_env_and_ty.param_env };
+        might_permit_raw_init_lax(layout, &layout_cx, kind)
     }
 }
 
@@ -36,12 +43,12 @@ pub fn might_permit_raw_init<'tcx>(
 fn might_permit_raw_init_strict<'tcx>(
     ty: TyAndLayout<'tcx>,
     tcx: TyCtxt<'tcx>,
-    kind: InitKind,
-) -> bool {
+    kind: ValidityRequirement,
+) -> Result<bool, LayoutError<'tcx>> {
     let machine = CompileTimeInterpreter::new(
         Limit::new(0),
         /*can_access_statics:*/ false,
-        /*check_alignment:*/ true,
+        CheckAlignment::Error,
     );
 
     let mut cx = InterpCx::new(tcx, rustc_span::DUMMY_SP, ParamEnv::reveal_all(), machine);
@@ -50,7 +57,7 @@ fn might_permit_raw_init_strict<'tcx>(
         .allocate(ty, MemoryKind::Machine(crate::const_eval::MemoryKind::Heap))
         .expect("OOM: failed to allocate for uninit check");
 
-    if kind == InitKind::Zero {
+    if kind == ValidityRequirement::Zero {
         cx.write_bytes_ptr(
             allocated.ptr,
             std::iter::repeat(0_u8).take(ty.layout.size().bytes_usize()),
@@ -64,7 +71,7 @@ fn might_permit_raw_init_strict<'tcx>(
     // This does *not* actually check that references are dereferenceable, but since all types that
     // require dereferenceability also require non-null, we don't actually get any false negatives
     // due to this.
-    cx.validate_operand(&ot).is_ok()
+    Ok(cx.validate_operand(&ot).is_ok())
 }
 
 /// Implements the 'lax' (default) version of the `might_permit_raw_init` checks; see that function for
@@ -72,15 +79,18 @@ fn might_permit_raw_init_strict<'tcx>(
 fn might_permit_raw_init_lax<'tcx>(
     this: TyAndLayout<'tcx>,
     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
-    init_kind: InitKind,
-) -> bool {
+    init_kind: ValidityRequirement,
+) -> Result<bool, LayoutError<'tcx>> {
     let scalar_allows_raw_init = move |s: Scalar| -> bool {
         match init_kind {
-            InitKind::Zero => {
+            ValidityRequirement::Inhabited => {
+                bug!("ValidityRequirement::Inhabited should have been handled above")
+            }
+            ValidityRequirement::Zero => {
                 // The range must contain 0.
                 s.valid_range(cx).contains(0)
             }
-            InitKind::UninitMitigated0x01Fill => {
+            ValidityRequirement::UninitMitigated0x01Fill => {
                 // The range must include an 0x01-filled buffer.
                 let mut val: u128 = 0x01;
                 for _ in 1..s.size(cx).bytes() {
@@ -89,6 +99,9 @@ fn might_permit_raw_init_lax<'tcx>(
                 }
                 s.valid_range(cx).contains(val)
             }
+            ValidityRequirement::Uninit => {
+                bug!("ValidityRequirement::Uninit should have been handled above")
+            }
         }
     };
 
@@ -102,20 +115,20 @@ fn might_permit_raw_init_lax<'tcx>(
     };
     if !valid {
         // This is definitely not okay.
-        return false;
+        return Ok(false);
     }
 
     // Special magic check for references and boxes (i.e., special pointer types).
     if let Some(pointee) = this.ty.builtin_deref(false) {
-        let pointee = cx.layout_of(pointee.ty).expect("need to be able to compute layouts");
+        let pointee = cx.layout_of(pointee.ty)?;
         // We need to ensure that the LLVM attributes `aligned` and `dereferenceable(size)` are satisfied.
         if pointee.align.abi.bytes() > 1 {
             // 0x01-filling is not aligned.
-            return false;
+            return Ok(false);
         }
         if pointee.size.bytes() > 0 {
             // A 'fake' integer pointer is not sufficiently dereferenceable.
-            return false;
+            return Ok(false);
         }
     }
 
@@ -128,9 +141,9 @@ fn might_permit_raw_init_lax<'tcx>(
         }
         FieldsShape::Arbitrary { offsets, .. } => {
             for idx in 0..offsets.len() {
-                if !might_permit_raw_init_lax(this.field(cx, idx), cx, init_kind) {
+                if !might_permit_raw_init_lax(this.field(cx, idx), cx, init_kind)? {
                     // We found a field that is unhappy with this kind of initialization.
-                    return false;
+                    return Ok(false);
                 }
             }
         }
@@ -147,5 +160,5 @@ fn might_permit_raw_init_lax<'tcx>(
         }
     }
 
-    true
+    Ok(true)
 }
diff --git a/compiler/rustc_const_eval/src/util/compare_types.rs b/compiler/rustc_const_eval/src/util/compare_types.rs
new file mode 100644
index 00000000000..f5f3d5de6b5
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/compare_types.rs
@@ -0,0 +1,63 @@
+//! Routines to check for relations between fully inferred types.
+//!
+//! FIXME: Move this to a more general place. The utility of this extends to
+//! other areas of the compiler as well.
+
+use rustc_infer::infer::{DefiningAnchor, TyCtxtInferExt};
+use rustc_infer::traits::ObligationCause;
+use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
+use rustc_trait_selection::traits::ObligationCtxt;
+
+/// Returns whether the two types are equal up to subtyping.
+///
+/// This is used in case we don't know the expected subtyping direction
+/// and still want to check whether anything is broken.
+pub fn is_equal_up_to_subtyping<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    src: Ty<'tcx>,
+    dest: Ty<'tcx>,
+) -> bool {
+    // Fast path.
+    if src == dest {
+        return true;
+    }
+
+    // Check for subtyping in either direction.
+    is_subtype(tcx, param_env, src, dest) || is_subtype(tcx, param_env, dest, src)
+}
+
+/// Returns whether `src` is a subtype of `dest`, i.e. `src <: dest`.
+///
+/// This mostly ignores opaque types as it can be used in constraining contexts
+/// while still computing the final underlying type.
+pub fn is_subtype<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    src: Ty<'tcx>,
+    dest: Ty<'tcx>,
+) -> bool {
+    if src == dest {
+        return true;
+    }
+
+    let mut builder =
+        tcx.infer_ctxt().ignoring_regions().with_opaque_type_inference(DefiningAnchor::Bubble);
+    let infcx = builder.build();
+    let ocx = ObligationCtxt::new(&infcx);
+    let cause = ObligationCause::dummy();
+    let src = ocx.normalize(&cause, param_env, src);
+    let dest = ocx.normalize(&cause, param_env, dest);
+    match ocx.sub(&cause, param_env, src, dest) {
+        Ok(()) => {}
+        Err(_) => return false,
+    };
+    let errors = ocx.select_all_or_error();
+    // With `Reveal::All`, opaque types get normalized away, with `Reveal::UserFacing`
+    // we would get unification errors because we're unable to look into opaque types,
+    // even if they're constrained in our current function.
+    //
+    // It seems very unlikely that this hides any bugs.
+    let _ = infcx.take_opaque_types();
+    errors.is_empty()
+}
diff --git a/compiler/rustc_const_eval/src/util/mod.rs b/compiler/rustc_const_eval/src/util/mod.rs
index 4d0f81a4060..c0aabd77cee 100644
--- a/compiler/rustc_const_eval/src/util/mod.rs
+++ b/compiler/rustc_const_eval/src/util/mod.rs
@@ -1,14 +1,14 @@
-pub mod aggregate;
 mod alignment;
 mod call_kind;
+mod check_validity_requirement;
 pub mod collect_writes;
+mod compare_types;
 mod find_self_call;
-mod might_permit_raw_init;
 mod type_name;
 
-pub use self::aggregate::expand_aggregate;
 pub use self::alignment::is_disaligned;
 pub use self::call_kind::{call_kind, CallDesugaringKind, CallKind};
+pub use self::check_validity_requirement::check_validity_requirement;
+pub use self::compare_types::{is_equal_up_to_subtyping, is_subtype};
 pub use self::find_self_call::find_self_call;
-pub use self::might_permit_raw_init::might_permit_raw_init;
 pub use self::type_name::type_name;
diff --git a/compiler/rustc_const_eval/src/util/type_name.rs b/compiler/rustc_const_eval/src/util/type_name.rs
index 08a6d69b8e4..4e80a285186 100644
--- a/compiler/rustc_const_eval/src/util/type_name.rs
+++ b/compiler/rustc_const_eval/src/util/type_name.rs
@@ -58,13 +58,13 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
             // Types with identity (print the module path).
             ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), substs)
             | ty::FnDef(def_id, substs)
-            | ty::Opaque(def_id, substs)
-            | ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs })
+            | ty::Alias(_, ty::AliasTy { def_id, substs, .. })
             | ty::Closure(def_id, substs)
             | ty::Generator(def_id, substs, _) => self.print_def_path(def_id, substs),
             ty::Foreign(def_id) => self.print_def_path(def_id, &[]),
 
             ty::GeneratorWitness(_) => bug!("type_name: unexpected `GeneratorWitness`"),
+            ty::GeneratorWitnessMIR(..) => bug!("type_name: unexpected `GeneratorWitnessMIR`"),
         }
     }
 
@@ -74,7 +74,7 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
 
     fn print_dyn_existential(
         self,
-        predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+        predicates: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
     ) -> Result<Self::DynExistential, Self::Error> {
         self.pretty_print_dyn_existential(predicates)
     }