about summary refs log tree commit diff
path: root/compiler/rustc_const_eval
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval')
-rw-r--r--compiler/rustc_const_eval/Cargo.toml2
-rw-r--r--compiler/rustc_const_eval/messages.ftl7
-rw-r--r--compiler/rustc_const_eval/src/const_eval/error.rs5
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs8
-rw-r--r--compiler/rustc_const_eval/src/const_eval/fn_queries.rs13
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs2
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs6
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs64
-rw-r--r--compiler/rustc_const_eval/src/errors.rs16
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs6
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs102
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs16
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs133
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs42
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs6
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs4
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs7
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs264
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs9
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs488
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs417
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs13
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs25
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs130
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs692
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs42
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/ops.rs8
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs6
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs7
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs550
-rw-r--r--compiler/rustc_const_eval/src/util/compare_types.rs14
31 files changed, 1533 insertions, 1571 deletions
diff --git a/compiler/rustc_const_eval/Cargo.toml b/compiler/rustc_const_eval/Cargo.toml
index 74030a43c50..4e47fed8640 100644
--- a/compiler/rustc_const_eval/Cargo.toml
+++ b/compiler/rustc_const_eval/Cargo.toml
@@ -8,7 +8,7 @@ edition = "2021"
 [dependencies]
 tracing = "0.1"
 either = "1"
-rustc_apfloat = { path = "../rustc_apfloat" }
+rustc_apfloat = "0.2.0"
 rustc_ast = { path = "../rustc_ast" }
 rustc_attr = { path = "../rustc_attr" }
 rustc_data_structures = { path = "../rustc_data_structures" }
diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl
index d8eade5bd2a..671c2be1de9 100644
--- a/compiler/rustc_const_eval/messages.ftl
+++ b/compiler/rustc_const_eval/messages.ftl
@@ -408,8 +408,11 @@ const_eval_undefined_behavior =
 const_eval_undefined_behavior_note =
     The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior.
 
+const_eval_uninhabited_enum_tag = {$front_matter}: encountered an uninhabited enum variant
+const_eval_uninhabited_enum_variant_read =
+    read discriminant of an uninhabited enum variant
 const_eval_uninhabited_enum_variant_written =
-    writing discriminant of an uninhabited enum
+    writing discriminant of an uninhabited enum variant
 const_eval_uninhabited_val = {$front_matter}: encountered a value of uninhabited type `{$ty}`
 const_eval_uninit = {$front_matter}: encountered uninitialized bytes
 const_eval_uninit_bool = {$front_matter}: encountered uninitialized memory, but expected a boolean
@@ -423,8 +426,6 @@ const_eval_uninit_int = {$front_matter}: encountered uninitialized memory, but e
 const_eval_uninit_raw_ptr = {$front_matter}: encountered uninitialized memory, but expected a raw pointer
 const_eval_uninit_ref = {$front_matter}: encountered uninitialized memory, but expected a reference
 const_eval_uninit_str = {$front_matter}: encountered uninitialized data in `str`
-const_eval_uninit_unsized_local =
-    unsized local is used while uninitialized
 const_eval_unreachable = entering unreachable code
 const_eval_unreachable_unwind =
     unwinding past a stack frame that does not allow unwinding
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs
index ffeff8d079a..d39a7e8a192 100644
--- a/compiler/rustc_const_eval/src/const_eval/error.rs
+++ b/compiler/rustc_const_eval/src/const_eval/error.rs
@@ -138,7 +138,10 @@ where
         err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
             ErrorHandled::TooGeneric
         }
-        err_inval!(AlreadyReported(error_reported)) => ErrorHandled::Reported(error_reported),
+        err_inval!(AlreadyReported(guar)) => ErrorHandled::Reported(guar),
+        err_inval!(Layout(LayoutError::ReferencesError(guar))) => {
+            ErrorHandled::Reported(guar.into())
+        }
         err_inval!(Layout(layout_error @ LayoutError::SizeOverflow(_))) => {
             // We must *always* hard error on these, even if the caller wants just a lint.
             // The `message` makes little sense here, this is a more serious error than the
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index b21cb984de6..4c7e9194401 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -52,13 +52,13 @@ fn eval_body_using_ecx<'mir, 'tcx>(
     trace!(
         "eval_body_using_ecx: pushing stack frame for global: {}{}",
         with_no_trimmed_paths!(ecx.tcx.def_path_str(cid.instance.def_id())),
-        cid.promoted.map_or_else(String::new, |p| format!("::promoted[{:?}]", p))
+        cid.promoted.map_or_else(String::new, |p| format!("::promoted[{p:?}]"))
     );
 
     ecx.push_stack_frame(
         cid.instance,
         body,
-        &ret.into(),
+        &ret.clone().into(),
         StackPopCleanup::Root { cleanup: false },
     )?;
 
@@ -228,7 +228,6 @@ pub fn eval_to_const_value_raw_provider<'tcx>(
     tcx: TyCtxt<'tcx>,
     key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
 ) -> ::rustc_middle::mir::interpret::EvalToConstValueResult<'tcx> {
-    assert!(key.param_env.is_const());
     // see comment in eval_to_allocation_raw_provider for what we're doing here
     if key.param_env.reveal() == Reveal::All {
         let mut key = key;
@@ -269,7 +268,6 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
     tcx: TyCtxt<'tcx>,
     key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
 ) -> ::rustc_middle::mir::interpret::EvalToAllocationRawResult<'tcx> {
-    assert!(key.param_env.is_const());
     // Because the constant is computed twice (once per value of `Reveal`), we are at risk of
     // reporting the same error twice here. To resolve this, we check whether we can evaluate the
     // constant in the more restrictive `Reveal::UserFacing`, which most likely already was
@@ -356,7 +354,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
             // Since evaluation had no errors, validate the resulting constant.
             // This is a separate `try` block to provide more targeted error reporting.
             let validation: Result<_, InterpErrorInfo<'_>> = try {
-                let mut ref_tracking = RefTracking::new(mplace);
+                let mut ref_tracking = RefTracking::new(mplace.clone());
                 let mut inner = false;
                 while let Some((mplace, path)) = ref_tracking.todo.pop() {
                     let mode = match tcx.static_mutability(cid.instance.def_id()) {
diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
index fa8253d5e49..cc39387c41f 100644
--- a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
@@ -28,15 +28,18 @@ pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
         && tcx.constness(parent_id) == hir::Constness::Const
 }
 
-/// Checks whether an item is considered to be `const`. If it is a constructor, it is const. If
-/// it is a trait impl/function, return if it has a `const` modifier. If it is an intrinsic,
-/// report whether said intrinsic has a `rustc_const_{un,}stable` attribute. Otherwise, return
-/// `Constness::NotConst`.
+/// Checks whether an item is considered to be `const`. If it is a constructor, anonymous const,
+/// const block, const item or associated const, it is const. If it is a trait impl/function,
+/// return if it has a `const` modifier. If it is an intrinsic, report whether said intrinsic
+/// has a `rustc_const_{un,}stable` attribute. Otherwise, return `Constness::NotConst`.
 fn constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness {
     let node = tcx.hir().get_by_def_id(def_id);
 
     match node {
-        hir::Node::Ctor(_) => hir::Constness::Const,
+        hir::Node::Ctor(_)
+        | hir::Node::AnonConst(_)
+        | hir::Node::ConstBlock(_)
+        | hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. }) => hir::Constness::Const,
         hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(impl_), .. }) => impl_.constness,
         hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..), .. }) => {
             // Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
index 267795a6cb4..6630eeca27e 100644
--- a/compiler/rustc_const_eval/src/const_eval/machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -216,7 +216,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
 
             let mut msg_place = self.deref_operand(&args[0])?;
             while msg_place.layout.ty.is_ref() {
-                msg_place = self.deref_operand(&msg_place.into())?;
+                msg_place = self.deref_operand(&msg_place)?;
             }
 
             let msg = Symbol::intern(self.read_str(&msg_place)?);
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
index 0a3c3914ff9..ef31155215a 100644
--- a/compiler/rustc_const_eval/src/const_eval/mod.rs
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -101,8 +101,8 @@ pub(crate) fn try_destructure_mir_constant_for_diagnostics<'tcx>(
             return None;
         }
         ty::Adt(def, _) => {
-            let variant = ecx.read_discriminant(&op).ok()?.1;
-            let down = ecx.operand_downcast(&op, variant).ok()?;
+            let variant = ecx.read_discriminant(&op).ok()?;
+            let down = ecx.project_downcast(&op, variant).ok()?;
             (def.variants()[variant].fields.len(), Some(variant), down)
         }
         ty::Tuple(args) => (args.len(), None, op),
@@ -111,7 +111,7 @@ pub(crate) fn try_destructure_mir_constant_for_diagnostics<'tcx>(
 
     let fields_iter = (0..field_count)
         .map(|i| {
-            let field_op = ecx.operand_field(&down, i).ok()?;
+            let field_op = ecx.project_field(&down, i).ok()?;
             let val = op_to_const(&ecx, &field_op);
             Some((val, field_op.layout.ty))
         })
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index 9531d973eb3..7c1dbddfc26 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -2,11 +2,11 @@ use super::eval_queries::{mk_eval_cx, op_to_const};
 use super::machine::CompileTimeEvalContext;
 use super::{ValTreeCreationError, ValTreeCreationResult, VALTREE_MAX_NODES};
 use crate::const_eval::CanAccessStatics;
+use crate::interpret::MPlaceTy;
 use crate::interpret::{
     intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemPlaceMeta,
-    MemoryKind, PlaceTy, Scalar,
+    MemoryKind, Place, Projectable, Scalar,
 };
-use crate::interpret::{MPlaceTy, Value};
 use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
 use rustc_span::source_map::DUMMY_SP;
 use rustc_target::abi::{Align, FieldIdx, VariantIdx, FIRST_VARIANT};
@@ -20,15 +20,15 @@ fn branches<'tcx>(
     num_nodes: &mut usize,
 ) -> ValTreeCreationResult<'tcx> {
     let place = match variant {
-        Some(variant) => ecx.mplace_downcast(&place, variant).unwrap(),
-        None => *place,
+        Some(variant) => ecx.project_downcast(place, variant).unwrap(),
+        None => place.clone(),
     };
     let variant = variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32()))));
     debug!(?place, ?variant);
 
     let mut fields = Vec::with_capacity(n);
     for i in 0..n {
-        let field = ecx.mplace_field(&place, i).unwrap();
+        let field = ecx.project_field(&place, i).unwrap();
         let valtree = const_to_valtree_inner(ecx, &field, num_nodes)?;
         fields.push(Some(valtree));
     }
@@ -55,13 +55,11 @@ fn slice_branches<'tcx>(
     place: &MPlaceTy<'tcx>,
     num_nodes: &mut usize,
 ) -> ValTreeCreationResult<'tcx> {
-    let n = place
-        .len(&ecx.tcx.tcx)
-        .unwrap_or_else(|_| panic!("expected to use len of place {:?}", place));
+    let n = place.len(ecx).unwrap_or_else(|_| panic!("expected to use len of place {place:?}"));
 
     let mut elems = Vec::with_capacity(n as usize);
     for i in 0..n {
-        let place_elem = ecx.mplace_index(place, i).unwrap();
+        let place_elem = ecx.project_index(place, i).unwrap();
         let valtree = const_to_valtree_inner(ecx, &place_elem, num_nodes)?;
         elems.push(valtree);
     }
@@ -88,7 +86,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
             Ok(ty::ValTree::zst())
         }
         ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
-            let Ok(val) = ecx.read_immediate(&place.into()) else {
+            let Ok(val) = ecx.read_immediate(place) else {
                 return Err(ValTreeCreationError::Other);
             };
             let val = val.to_scalar();
@@ -104,7 +102,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
         ty::FnPtr(_) | ty::RawPtr(_) => Err(ValTreeCreationError::NonSupportedType),
 
         ty::Ref(_, _, _)  => {
-            let Ok(derefd_place)= ecx.deref_operand(&place.into()) else {
+            let Ok(derefd_place)= ecx.deref_operand(place) else {
                 return Err(ValTreeCreationError::Other);
             };
             debug!(?derefd_place);
@@ -132,7 +130,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
                 bug!("uninhabited types should have errored and never gotten converted to valtree")
             }
 
-            let Ok((_, variant)) = ecx.read_discriminant(&place.into()) else {
+            let Ok(variant) = ecx.read_discriminant(place) else {
                 return Err(ValTreeCreationError::Other);
             };
             branches(ecx, place, def.variant(variant).fields.len(), def.is_enum().then_some(variant), num_nodes)
@@ -282,7 +280,7 @@ pub fn valtree_to_const_value<'tcx>(
             ),
         },
         ty::Ref(_, _, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => {
-            let mut place = match ty.kind() {
+            let place = match ty.kind() {
                 ty::Ref(_, inner_ty, _) => {
                     // Need to create a place for the pointee to fill for Refs
                     create_pointee_place(&mut ecx, *inner_ty, valtree)
@@ -291,8 +289,8 @@ pub fn valtree_to_const_value<'tcx>(
             };
             debug!(?place);
 
-            valtree_into_mplace(&mut ecx, &mut place, valtree);
-            dump_place(&ecx, place.into());
+            valtree_into_mplace(&mut ecx, &place, valtree);
+            dump_place(&ecx, &place);
             intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap();
 
             match ty.kind() {
@@ -331,7 +329,7 @@ pub fn valtree_to_const_value<'tcx>(
 #[instrument(skip(ecx), level = "debug")]
 fn valtree_into_mplace<'tcx>(
     ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
-    place: &mut MPlaceTy<'tcx>,
+    place: &MPlaceTy<'tcx>,
     valtree: ty::ValTree<'tcx>,
 ) {
     // This will match on valtree and write the value(s) corresponding to the ValTree
@@ -347,14 +345,14 @@ fn valtree_into_mplace<'tcx>(
         ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
             let scalar_int = valtree.unwrap_leaf();
             debug!("writing trivial valtree {:?} to place {:?}", scalar_int, place);
-            ecx.write_immediate(Immediate::Scalar(scalar_int.into()), &place.into()).unwrap();
+            ecx.write_immediate(Immediate::Scalar(scalar_int.into()), place).unwrap();
         }
         ty::Ref(_, inner_ty, _) => {
-            let mut pointee_place = create_pointee_place(ecx, *inner_ty, valtree);
+            let pointee_place = create_pointee_place(ecx, *inner_ty, valtree);
             debug!(?pointee_place);
 
-            valtree_into_mplace(ecx, &mut pointee_place, valtree);
-            dump_place(ecx, pointee_place.into());
+            valtree_into_mplace(ecx, &pointee_place, valtree);
+            dump_place(ecx, &pointee_place);
             intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place).unwrap();
 
             let imm = match inner_ty.kind() {
@@ -371,7 +369,7 @@ fn valtree_into_mplace<'tcx>(
             };
             debug!(?imm);
 
-            ecx.write_immediate(imm, &place.into()).unwrap();
+            ecx.write_immediate(imm, place).unwrap();
         }
         ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str | ty::Slice(_) => {
             let branches = valtree.unwrap_branch();
@@ -386,12 +384,12 @@ fn valtree_into_mplace<'tcx>(
                     debug!(?variant);
 
                     (
-                        place.project_downcast(ecx, variant_idx).unwrap(),
+                        ecx.project_downcast(place, variant_idx).unwrap(),
                         &branches[1..],
                         Some(variant_idx),
                     )
                 }
-                _ => (*place, branches, None),
+                _ => (place.clone(), branches, None),
             };
             debug!(?place_adjusted, ?branches);
 
@@ -400,8 +398,8 @@ fn valtree_into_mplace<'tcx>(
             for (i, inner_valtree) in branches.iter().enumerate() {
                 debug!(?i, ?inner_valtree);
 
-                let mut place_inner = match ty.kind() {
-                    ty::Str | ty::Slice(_) => ecx.mplace_index(&place, i as u64).unwrap(),
+                let place_inner = match ty.kind() {
+                    ty::Str | ty::Slice(_) => ecx.project_index(place, i as u64).unwrap(),
                     _ if !ty.is_sized(*ecx.tcx, ty::ParamEnv::empty())
                         && i == branches.len() - 1 =>
                     {
@@ -441,29 +439,29 @@ fn valtree_into_mplace<'tcx>(
                             )
                             .unwrap()
                     }
-                    _ => ecx.mplace_field(&place_adjusted, i).unwrap(),
+                    _ => ecx.project_field(&place_adjusted, i).unwrap(),
                 };
 
                 debug!(?place_inner);
-                valtree_into_mplace(ecx, &mut place_inner, *inner_valtree);
-                dump_place(&ecx, place_inner.into());
+                valtree_into_mplace(ecx, &place_inner, *inner_valtree);
+                dump_place(&ecx, &place_inner);
             }
 
             debug!("dump of place_adjusted:");
-            dump_place(ecx, place_adjusted.into());
+            dump_place(ecx, &place_adjusted);
 
             if let Some(variant_idx) = variant_idx {
                 // don't forget filling the place with the discriminant of the enum
-                ecx.write_discriminant(variant_idx, &place.into()).unwrap();
+                ecx.write_discriminant(variant_idx, place).unwrap();
             }
 
             debug!("dump of place after writing discriminant:");
-            dump_place(ecx, place.into());
+            dump_place(ecx, place);
         }
         _ => bug!("shouldn't have created a ValTree for {:?}", ty),
     }
 }
 
-fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: PlaceTy<'tcx>) {
-    trace!("{:?}", ecx.dump_place(*place));
+fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: &MPlaceTy<'tcx>) {
+    trace!("{:?}", ecx.dump_place(Place::Ptr(**place)));
 }
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
index ca38cce710e..e1109e584b7 100644
--- a/compiler/rustc_const_eval/src/errors.rs
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -511,7 +511,8 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
             InvalidUninitBytes(Some(_)) => const_eval_invalid_uninit_bytes,
             DeadLocal => const_eval_dead_local,
             ScalarSizeMismatch(_) => const_eval_scalar_size_mismatch,
-            UninhabitedEnumVariantWritten => const_eval_uninhabited_enum_variant_written,
+            UninhabitedEnumVariantWritten(_) => const_eval_uninhabited_enum_variant_written,
+            UninhabitedEnumVariantRead(_) => const_eval_uninhabited_enum_variant_read,
             Validation(e) => e.diagnostic_message(),
             Custom(x) => (x.msg)(),
         }
@@ -535,7 +536,8 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
             | InvalidMeta(InvalidMetaKind::TooBig)
             | InvalidUninitBytes(None)
             | DeadLocal
-            | UninhabitedEnumVariantWritten => {}
+            | UninhabitedEnumVariantWritten(_)
+            | UninhabitedEnumVariantRead(_) => {}
             BoundsCheckFailed { len, index } => {
                 builder.set_arg("len", len);
                 builder.set_arg("index", index);
@@ -623,6 +625,7 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
             UnsafeCell => const_eval_unsafe_cell,
             UninhabitedVal { .. } => const_eval_uninhabited_val,
             InvalidEnumTag { .. } => const_eval_invalid_enum_tag,
+            UninhabitedEnumTag => const_eval_uninhabited_enum_tag,
             UninitEnumTag => const_eval_uninit_enum_tag,
             UninitStr => const_eval_uninit_str,
             Uninit { expected: ExpectedKind::Bool } => const_eval_uninit_bool,
@@ -760,7 +763,8 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
             | InvalidMetaSliceTooLarge { .. }
             | InvalidMetaTooLarge { .. }
             | DanglingPtrUseAfterFree { .. }
-            | DanglingPtrOutOfBounds { .. } => {}
+            | DanglingPtrOutOfBounds { .. }
+            | UninhabitedEnumTag => {}
         }
     }
 }
@@ -835,7 +839,9 @@ impl<'tcx> ReportErrorExt for InvalidProgramInfo<'tcx> {
                 rustc_middle::error::middle_adjust_for_foreign_abi_error
             }
             InvalidProgramInfo::SizeOfUnsizedType(_) => const_eval_size_of_unsized,
-            InvalidProgramInfo::UninitUnsizedLocal => const_eval_uninit_unsized_local,
+            InvalidProgramInfo::ConstPropNonsense => {
+                panic!("We had const-prop nonsense, this should never be printed")
+            }
         }
     }
     fn add_args<G: EmissionGuarantee>(
@@ -846,7 +852,7 @@ impl<'tcx> ReportErrorExt for InvalidProgramInfo<'tcx> {
         match self {
             InvalidProgramInfo::TooGeneric
             | InvalidProgramInfo::AlreadyReported(_)
-            | InvalidProgramInfo::UninitUnsizedLocal => {}
+            | InvalidProgramInfo::ConstPropNonsense => {}
             InvalidProgramInfo::Layout(e) => {
                 let diag: DiagnosticBuilder<'_, ()> = e.into_diagnostic().into_diagnostic(handler);
                 for (name, val) in diag.args() {
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index dd7a1fcc165..98e853dc4d9 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -56,7 +56,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
 
             CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
-                let src = self.read_immediate(&src)?;
+                let src = self.read_immediate(src)?;
                 let res = self.ptr_to_ptr(&src, cast_ty)?;
                 self.write_immediate(res, dest)?;
             }
@@ -420,8 +420,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     if cast_ty_field.is_zst() {
                         continue;
                     }
-                    let src_field = self.operand_field(src, i)?;
-                    let dst_field = self.place_field(dest, i)?;
+                    let src_field = self.project_field(src, i)?;
+                    let dst_field = self.project_field(dest, i)?;
                     if src_field.layout.ty == cast_ty_field.ty {
                         self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?;
                     } else {
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index f23a455c2ca..6c35fb01a93 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -1,11 +1,11 @@
 //! Functions for reading and writing discriminants of multi-variant layouts (enums and generators).
 
-use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt};
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
 use rustc_middle::{mir, ty};
 use rustc_target::abi::{self, TagEncoding};
 use rustc_target::abi::{VariantIdx, Variants};
 
-use super::{ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Scalar};
+use super::{ImmTy, InterpCx, InterpResult, Machine, Readable, Scalar, Writeable};
 
 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// Writes the discriminant of the given variant.
@@ -13,7 +13,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     pub fn write_discriminant(
         &mut self,
         variant_index: VariantIdx,
-        dest: &PlaceTy<'tcx, M::Provenance>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
         // Layout computation excludes uninhabited variants from consideration
         // therefore there's no way to represent those variants in the given layout.
@@ -21,11 +21,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // discriminant, so we cannot do anything here.
         // When evaluating we will always error before even getting here, but ConstProp 'executes'
         // dead code, so we cannot ICE here.
-        if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
-            throw_ub!(UninhabitedEnumVariantWritten)
+        if dest.layout().for_variant(self, variant_index).abi.is_uninhabited() {
+            throw_ub!(UninhabitedEnumVariantWritten(variant_index))
         }
 
-        match dest.layout.variants {
+        match dest.layout().variants {
             abi::Variants::Single { index } => {
                 assert_eq!(index, variant_index);
             }
@@ -38,8 +38,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 // No need to validate that the discriminant here because the
                 // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
 
-                let discr_val =
-                    dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
+                let discr_val = dest
+                    .layout()
+                    .ty
+                    .discriminant_for_variant(*self.tcx, variant_index)
+                    .unwrap()
+                    .val;
 
                 // raw discriminants for enums are isize or bigger during
                 // their computation, but the in-memory tag is the smallest possible
@@ -47,7 +51,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let size = tag_layout.size(self);
                 let tag_val = size.truncate(discr_val);
 
-                let tag_dest = self.place_field(dest, tag_field)?;
+                let tag_dest = self.project_field(dest, tag_field)?;
                 self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
             }
             abi::Variants::Multiple {
@@ -78,7 +82,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         &niche_start_val,
                     )?;
                     // Write result.
-                    let niche_dest = self.place_field(dest, tag_field)?;
+                    let niche_dest = self.project_field(dest, tag_field)?;
                     self.write_immediate(*tag_val, &niche_dest)?;
                 }
             }
@@ -92,11 +96,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     #[instrument(skip(self), level = "trace")]
     pub fn read_discriminant(
         &self,
-        op: &OpTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
-        trace!("read_discriminant_value {:#?}", op.layout);
+        op: &impl Readable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, VariantIdx> {
+        let ty = op.layout().ty;
+        trace!("read_discriminant_value {:#?}", op.layout());
         // Get type and layout of the discriminant.
-        let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
+        let discr_layout = self.layout_of(ty.discriminant_ty(*self.tcx))?;
         trace!("discriminant type: {:?}", discr_layout.ty);
 
         // We use "discriminant" to refer to the value associated with a particular enum variant.
@@ -104,21 +109,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // declared list of variants -- they can differ with explicitly assigned discriminants.
         // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
         // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
-        let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
+        let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout().variants {
             Variants::Single { index } => {
-                let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
-                    Some(discr) => {
-                        // This type actually has discriminants.
-                        assert_eq!(discr.ty, discr_layout.ty);
-                        Scalar::from_uint(discr.val, discr_layout.size)
+                // Do some extra checks on enums.
+                if ty.is_enum() {
+                    // Hilariously, `Single` is used even for 0-variant enums.
+                    // (See https://github.com/rust-lang/rust/issues/89765).
+                    if matches!(ty.kind(), ty::Adt(def, ..) if def.variants().is_empty()) {
+                        throw_ub!(UninhabitedEnumVariantRead(index))
                     }
-                    None => {
-                        // On a type without actual discriminants, variant is 0.
-                        assert_eq!(index.as_u32(), 0);
-                        Scalar::from_uint(index.as_u32(), discr_layout.size)
+                    // For consisteny with `write_discriminant`, and to make sure that
+                    // `project_downcast` cannot fail due to strange layouts, we declare immediate UB
+                    // for uninhabited variants.
+                    if op.layout().for_variant(self, index).abi.is_uninhabited() {
+                        throw_ub!(UninhabitedEnumVariantRead(index))
                     }
-                };
-                return Ok((discr, index));
+                }
+                return Ok(index);
             }
             Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
                 (tag, tag_encoding, tag_field)
@@ -138,13 +145,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
 
         // Read tag and sanity-check `tag_layout`.
-        let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
+        let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?;
         assert_eq!(tag_layout.size, tag_val.layout.size);
         assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
         trace!("tag value: {}", tag_val);
 
         // Figure out which discriminant and variant this corresponds to.
-        Ok(match *tag_encoding {
+        let index = match *tag_encoding {
             TagEncoding::Direct => {
                 let scalar = tag_val.to_scalar();
                 // Generate a specific error if `tag_val` is not an integer.
@@ -160,7 +167,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
                 let discr_bits = discr_val.assert_bits(discr_layout.size);
                 // Convert discriminant to variant index, and catch invalid discriminants.
-                let index = match *op.layout.ty.kind() {
+                let index = match *ty.kind() {
                     ty::Adt(adt, _) => {
                         adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
                     }
@@ -172,7 +179,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 }
                 .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
                 // Return the cast value, and the index.
-                (discr_val, index.0)
+                index.0
             }
             TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
                 let tag_val = tag_val.to_scalar();
@@ -214,12 +221,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                                     .checked_add(variant_index_relative)
                                     .expect("overflow computing absolute variant idx"),
                             );
-                            let variants = op
-                                .layout
-                                .ty
-                                .ty_adt_def()
-                                .expect("tagged layout for non adt")
-                                .variants();
+                            let variants =
+                                ty.ty_adt_def().expect("tagged layout for non adt").variants();
                             assert!(variant_index < variants.next_index());
                             variant_index
                         } else {
@@ -230,7 +233,32 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 // Compute the size of the scalar we need to return.
                 // No need to cast, because the variant index directly serves as discriminant and is
                 // encoded in the tag.
-                (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
+                variant
+            }
+        };
+        // For consisteny with `write_discriminant`, and to make sure that `project_downcast` cannot fail due to strange layouts, we declare immediate UB for uninhabited variants.
+        if op.layout().for_variant(self, index).abi.is_uninhabited() {
+            throw_ub!(UninhabitedEnumVariantRead(index))
+        }
+        Ok(index)
+    }
+
+    pub fn discriminant_for_variant(
+        &self,
+        layout: TyAndLayout<'tcx>,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+        let discr_layout = self.layout_of(layout.ty.discriminant_ty(*self.tcx))?;
+        Ok(match layout.ty.discriminant_for_variant(*self.tcx, variant) {
+            Some(discr) => {
+                // This type actually has discriminants.
+                assert_eq!(discr.ty, discr_layout.ty);
+                Scalar::from_uint(discr.val, discr_layout.size)
+            }
+            None => {
+                // On a type without actual discriminants, variant is 0.
+                assert_eq!(variant.as_u32(), 0);
+                Scalar::from_uint(variant.as_u32(), discr_layout.size)
             }
         })
     }
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index 04e046fbda3..3ac6f07e8b7 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -958,7 +958,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         } else {
             self.param_env
         };
-        let param_env = param_env.with_const();
         let val = self.ctfe_query(span, |tcx| tcx.eval_to_allocation_raw(param_env.and(gid)))?;
         self.raw_const_to_mplace(val)
     }
@@ -1014,9 +1013,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
 {
     fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
         match self.place {
-            Place::Local { frame, local } => {
+            Place::Local { frame, local, offset } => {
                 let mut allocs = Vec::new();
-                write!(fmt, "{:?}", local)?;
+                write!(fmt, "{local:?}")?;
+                if let Some(offset) = offset {
+                    write!(fmt, "+{:#x}", offset.bytes())?;
+                }
                 if frame != self.ecx.frame_idx() {
                     write!(fmt, " ({} frames up)", self.ecx.frame_idx() - frame)?;
                 }
@@ -1032,7 +1034,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
                             fmt,
                             " by {} ref {:?}:",
                             match mplace.meta {
-                                MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
+                                MemPlaceMeta::Meta(meta) => format!(" meta({meta:?})"),
                                 MemPlaceMeta::None => String::new(),
                             },
                             mplace.ptr,
@@ -1040,13 +1042,13 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
                         allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id));
                     }
                     LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
-                        write!(fmt, " {:?}", val)?;
+                        write!(fmt, " {val:?}")?;
                         if let Scalar::Ptr(ptr, _size) = val {
                             allocs.push(ptr.provenance.get_alloc_id());
                         }
                     }
                     LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
-                        write!(fmt, " ({:?}, {:?})", val1, val2)?;
+                        write!(fmt, " ({val1:?}, {val2:?})")?;
                         if let Scalar::Ptr(ptr, _size) = val1 {
                             allocs.push(ptr.provenance.get_alloc_id());
                         }
@@ -1062,7 +1064,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
                 Some(alloc_id) => {
                     write!(fmt, "by ref {:?}: {:?}", mplace.ptr, self.ecx.dump_alloc(alloc_id))
                 }
-                ptr => write!(fmt, " integral by ref: {:?}", ptr),
+                ptr => write!(fmt, " integral by ref: {ptr:?}"),
             },
         }
     }
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 107e5bec614..910c3ca5d0a 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -164,82 +164,13 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
         &self.ecx
     }
 
-    fn visit_aggregate(
-        &mut self,
-        mplace: &MPlaceTy<'tcx>,
-        fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
-    ) -> InterpResult<'tcx> {
-        // We want to walk the aggregate to look for references to intern. While doing that we
-        // also need to take special care of interior mutability.
-        //
-        // As an optimization, however, if the allocation does not contain any references: we don't
-        // need to do the walk. It can be costly for big arrays for example (e.g. issue #93215).
-        let is_walk_needed = |mplace: &MPlaceTy<'tcx>| -> InterpResult<'tcx, bool> {
-            // ZSTs cannot contain pointers, we can avoid the interning walk.
-            if mplace.layout.is_zst() {
-                return Ok(false);
-            }
-
-            // Now, check whether this allocation could contain references.
-            //
-            // Note, this check may sometimes not be cheap, so we only do it when the walk we'd like
-            // to avoid could be expensive: on the potentially larger types, arrays and slices,
-            // rather than on all aggregates unconditionally.
-            if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
-                let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
-                    // We do the walk if we can't determine the size of the mplace: we may be
-                    // dealing with extern types here in the future.
-                    return Ok(true);
-                };
-
-                // If there is no provenance in this allocation, it does not contain references
-                // that point to another allocation, and we can avoid the interning walk.
-                if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
-                    if !alloc.has_provenance() {
-                        return Ok(false);
-                    }
-                } else {
-                    // We're encountering a ZST here, and can avoid the walk as well.
-                    return Ok(false);
-                }
-            }
-
-            // In the general case, we do the walk.
-            Ok(true)
-        };
-
-        // If this allocation contains no references to intern, we avoid the potentially costly
-        // walk.
-        //
-        // We can do this before the checks for interior mutability below, because only references
-        // are relevant in that situation, and we're checking if there are any here.
-        if !is_walk_needed(mplace)? {
-            return Ok(());
-        }
-
-        if let Some(def) = mplace.layout.ty.ty_adt_def() {
-            if def.is_unsafe_cell() {
-                // We are crossing over an `UnsafeCell`, we can mutate again. This means that
-                // References we encounter inside here are interned as pointing to mutable
-                // allocations.
-                // Remember the `old` value to handle nested `UnsafeCell`.
-                let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
-                let walked = self.walk_aggregate(mplace, fields);
-                self.inside_unsafe_cell = old;
-                return walked;
-            }
-        }
-
-        self.walk_aggregate(mplace, fields)
-    }
-
     fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
         // Handle Reference types, as these are the only types with provenance supported by const eval.
         // Raw pointers (and boxes) are handled by the `leftover_allocations` logic.
         let tcx = self.ecx.tcx;
         let ty = mplace.layout.ty;
         if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() {
-            let value = self.ecx.read_immediate(&mplace.into())?;
+            let value = self.ecx.read_immediate(mplace)?;
             let mplace = self.ecx.ref_to_mplace(&value)?;
             assert_eq!(mplace.layout.ty, referenced_ty);
             // Handle trait object vtables.
@@ -315,7 +246,63 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
             }
             Ok(())
         } else {
-            // Not a reference -- proceed recursively.
+            // Not a reference. Check if we want to recurse.
+            let is_walk_needed = |mplace: &MPlaceTy<'tcx>| -> InterpResult<'tcx, bool> {
+                // ZSTs cannot contain pointers, we can avoid the interning walk.
+                if mplace.layout.is_zst() {
+                    return Ok(false);
+                }
+
+                // Now, check whether this allocation could contain references.
+                //
+                // Note, this check may sometimes not be cheap, so we only do it when the walk we'd like
+                // to avoid could be expensive: on the potentially larger types, arrays and slices,
+                // rather than on all aggregates unconditionally.
+                if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
+                    let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
+                        // We do the walk if we can't determine the size of the mplace: we may be
+                        // dealing with extern types here in the future.
+                        return Ok(true);
+                    };
+
+                    // If there is no provenance in this allocation, it does not contain references
+                    // that point to another allocation, and we can avoid the interning walk.
+                    if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
+                        if !alloc.has_provenance() {
+                            return Ok(false);
+                        }
+                    } else {
+                        // We're encountering a ZST here, and can avoid the walk as well.
+                        return Ok(false);
+                    }
+                }
+
+                // In the general case, we do the walk.
+                Ok(true)
+            };
+
+            // If this allocation contains no references to intern, we avoid the potentially costly
+            // walk.
+            //
+            // We can do this before the checks for interior mutability below, because only references
+            // are relevant in that situation, and we're checking if there are any here.
+            if !is_walk_needed(mplace)? {
+                return Ok(());
+            }
+
+            if let Some(def) = mplace.layout.ty.ty_adt_def() {
+                if def.is_unsafe_cell() {
+                    // We are crossing over an `UnsafeCell`, we can mutate again. This means that
+                    // References we encounter inside here are interned as pointing to mutable
+                    // allocations.
+                    // Remember the `old` value to handle nested `UnsafeCell`.
+                    let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
+                    let walked = self.walk_value(mplace);
+                    self.inside_unsafe_cell = old;
+                    return walked;
+                }
+            }
+
             self.walk_value(mplace)
         }
     }
@@ -371,7 +358,7 @@ pub fn intern_const_alloc_recursive<
         Some(ret.layout.ty),
     );
 
-    ref_tracking.track((*ret, base_intern_mode), || ());
+    ref_tracking.track((ret.clone(), base_intern_mode), || ());
 
     while let Some(((mplace, mode), _)) = ref_tracking.todo.pop() {
         let res = InternVisitor {
@@ -477,7 +464,7 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
         ) -> InterpResult<'tcx, ()>,
     ) -> InterpResult<'tcx, ConstAllocation<'tcx>> {
         let dest = self.allocate(layout, MemoryKind::Stack)?;
-        f(self, &dest.into())?;
+        f(self, &dest.clone().into())?;
         let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
         alloc.mutability = Mutability::Not;
         Ok(self.tcx.mk_const_alloc(alloc))
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index 04cae23f852..be7c14f33c2 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -226,8 +226,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
             sym::discriminant_value => {
                 let place = self.deref_operand(&args[0])?;
-                let discr_val = self.read_discriminant(&place.into())?.0;
-                self.write_scalar(discr_val, dest)?;
+                let variant = self.read_discriminant(&place)?;
+                let discr = self.discriminant_for_variant(place.layout, variant)?;
+                self.write_scalar(discr, dest)?;
             }
             sym::exact_div => {
                 let l = self.read_immediate(&args[0])?;
@@ -393,17 +394,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         // For *all* intrinsics we first check `is_uninhabited` to give a more specific
                         // error message.
                         _ if layout.abi.is_uninhabited() => format!(
-                            "aborted execution: attempted to instantiate uninhabited type `{}`",
-                            ty
+                            "aborted execution: attempted to instantiate uninhabited type `{ty}`"
                         ),
                         ValidityRequirement::Inhabited => bug!("handled earlier"),
                         ValidityRequirement::Zero => format!(
-                            "aborted execution: attempted to zero-initialize type `{}`, which is invalid",
-                            ty
+                            "aborted execution: attempted to zero-initialize type `{ty}`, which is invalid"
                         ),
                         ValidityRequirement::UninitMitigated0x01Fill => format!(
-                            "aborted execution: attempted to leave type `{}` uninitialized, which is invalid",
-                            ty
+                            "aborted execution: attempted to leave type `{ty}` uninitialized, which is invalid"
                         ),
                         ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"),
                     };
@@ -419,19 +417,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 assert_eq!(input_len, dest_len, "Return vector length must match input length");
                 assert!(
                     index < dest_len,
-                    "Index `{}` must be in bounds of vector with length {}",
-                    index,
-                    dest_len
+                    "Index `{index}` must be in bounds of vector with length {dest_len}"
                 );
 
                 for i in 0..dest_len {
-                    let place = self.mplace_index(&dest, i)?;
+                    let place = self.project_index(&dest, i)?;
                     let value = if i == index {
                         elem.clone()
                     } else {
-                        self.mplace_index(&input, i)?.into()
+                        self.project_index(&input, i)?.into()
                     };
-                    self.copy_op(&value, &place.into(), /*allow_transmute*/ false)?;
+                    self.copy_op(&value, &place, /*allow_transmute*/ false)?;
                 }
             }
             sym::simd_extract => {
@@ -439,12 +435,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let (input, input_len) = self.operand_to_simd(&args[0])?;
                 assert!(
                     index < input_len,
-                    "index `{}` must be in bounds of vector with length {}",
-                    index,
-                    input_len
+                    "index `{index}` must be in bounds of vector with length {input_len}"
                 );
                 self.copy_op(
-                    &self.mplace_index(&input, index)?.into(),
+                    &self.project_index(&input, index)?,
                     dest,
                     /*allow_transmute*/ false,
                 )?;
@@ -609,7 +603,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
         nonoverlapping: bool,
     ) -> InterpResult<'tcx> {
-        let count = self.read_target_usize(&count)?;
+        let count = self.read_target_usize(count)?;
         let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
         let (size, align) = (layout.size, layout.align.abi);
         // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
@@ -621,8 +615,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             )
         })?;
 
-        let src = self.read_pointer(&src)?;
-        let dst = self.read_pointer(&dst)?;
+        let src = self.read_pointer(src)?;
+        let dst = self.read_pointer(dst)?;
 
         self.mem_copy(src, align, dst, align, size, nonoverlapping)
     }
@@ -635,9 +629,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     ) -> InterpResult<'tcx> {
         let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap().ty)?;
 
-        let dst = self.read_pointer(&dst)?;
-        let byte = self.read_scalar(&byte)?.to_u8()?;
-        let count = self.read_target_usize(&count)?;
+        let dst = self.read_pointer(dst)?;
+        let byte = self.read_scalar(byte)?.to_u8()?;
+        let count = self.read_target_usize(count)?;
 
         // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
         // but no actual allocation can be big enough for the difference to be noticeable.
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
index c4fe293bfac..948bec7464a 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
@@ -101,11 +101,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
 
         // Initialize fields.
-        self.write_immediate(file.to_ref(self), &self.mplace_field(&location, 0).unwrap().into())
+        self.write_immediate(file.to_ref(self), &self.project_field(&location, 0).unwrap())
             .expect("writing to memory we just allocated cannot fail");
-        self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into())
+        self.write_scalar(line, &self.project_field(&location, 1).unwrap())
             .expect("writing to memory we just allocated cannot fail");
-        self.write_scalar(col, &self.mplace_field(&location, 2).unwrap().into())
+        self.write_scalar(col, &self.project_field(&location, 2).unwrap())
             .expect("writing to memory we just allocated cannot fail");
 
         location
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 7b44a20ef03..02d022a2252 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -53,7 +53,7 @@ impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
         match self {
             MemoryKind::Stack => write!(f, "stack variable"),
             MemoryKind::CallerLocation => write!(f, "caller location"),
-            MemoryKind::Machine(m) => write!(f, "{}", m),
+            MemoryKind::Machine(m) => write!(f, "{m}"),
         }
     }
 }
@@ -907,7 +907,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a,
             match self.ecx.memory.alloc_map.get(id) {
                 Some((kind, alloc)) => {
                     // normal alloc
-                    write!(fmt, " ({}, ", kind)?;
+                    write!(fmt, " ({kind}, ")?;
                     write_allocation_track_relocs(
                         &mut *fmt,
                         *self.ecx.tcx,
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index f657f954f9c..b0b553c45d4 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -24,11 +24,12 @@ pub use self::eval_context::{Frame, FrameInfo, InterpCx, LocalState, LocalValue,
 pub use self::intern::{intern_const_alloc_recursive, InternKind};
 pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
 pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
-pub use self::operand::{ImmTy, Immediate, OpTy, Operand};
-pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy};
+pub use self::operand::{ImmTy, Immediate, OpTy, Operand, Readable};
+pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy, Writeable};
+pub use self::projection::Projectable;
 pub use self::terminator::FnArg;
 pub use self::validity::{CtfeValidationMode, RefTracking};
-pub use self::visitor::{MutValueVisitor, Value, ValueVisitor};
+pub use self::visitor::ValueVisitor;
 
 pub(crate) use self::intrinsics::eval_nullary_intrinsic;
 use eval_context::{from_known_layout, mir_assign_valid_types};
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index da45fdca1a1..6e57a56b445 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -1,6 +1,8 @@
 //! Functions concerning immediate values and operands, and reading from operands.
 //! All high-level functions to read from memory work on operands as sources.
 
+use std::assert_matches::assert_matches;
+
 use either::{Either, Left, Right};
 
 use rustc_hir::def::Namespace;
@@ -13,8 +15,8 @@ use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
 
 use super::{
     alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
-    InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Place, PlaceTy, Pointer,
-    Provenance, Scalar,
+    InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer,
+    Projectable, Provenance, Scalar,
 };
 
 /// An `Immediate` represents a single immediate self-contained Rust value.
@@ -31,7 +33,7 @@ pub enum Immediate<Prov: Provenance = AllocId> {
     /// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
     /// `Scalar::Initialized`).
     ScalarPair(Scalar<Prov>, Scalar<Prov>),
-    /// A value of fully uninitialized memory. Can have and size and layout.
+    /// A value of fully uninitialized memory. Can have arbitrary size and layout.
     Uninit,
 }
 
@@ -178,20 +180,6 @@ impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
     }
 }
 
-impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
-    #[inline(always)]
-    fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
-        OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
-    }
-}
-
-impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
-    #[inline(always)]
-    fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
-        OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
-    }
-}
-
 impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
     #[inline(always)]
     fn from(val: ImmTy<'tcx, Prov>) -> Self {
@@ -240,43 +228,126 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
         let int = self.to_scalar().assert_int();
         ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
     }
+
+    /// Compute the "sub-immediate" that is located within the `base` at the given offset with the
+    /// given layout.
+    // Not called `offset` to avoid confusion with the trait method.
+    fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
+        // This makes several assumptions about what layouts we will encounter; we match what
+        // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
+        let inner_val: Immediate<_> = match (**self, self.layout.abi) {
+            // if the entire value is uninit, then so is the field (can happen in ConstProp)
+            (Immediate::Uninit, _) => Immediate::Uninit,
+            // the field contains no information, can be left uninit
+            _ if layout.is_zst() => Immediate::Uninit,
+            // some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
+            // to detect those here and also give them no data
+            _ if matches!(layout.abi, Abi::Aggregate { .. })
+                && matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) =>
+            {
+                Immediate::Uninit
+            }
+            // the field covers the entire type
+            _ if layout.size == self.layout.size => {
+                assert_eq!(offset.bytes(), 0);
+                assert!(
+                    match (self.layout.abi, layout.abi) {
+                        (Abi::Scalar(..), Abi::Scalar(..)) => true,
+                        (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
+                        _ => false,
+                    },
+                    "cannot project into {} immediate with equally-sized field {}\nouter ABI: {:#?}\nfield ABI: {:#?}",
+                    self.layout.ty,
+                    layout.ty,
+                    self.layout.abi,
+                    layout.abi,
+                );
+                **self
+            }
+            // extract fields from types with `ScalarPair` ABI
+            (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
+                assert!(matches!(layout.abi, Abi::Scalar(..)));
+                Immediate::from(if offset.bytes() == 0 {
+                    debug_assert_eq!(layout.size, a.size(cx));
+                    a_val
+                } else {
+                    debug_assert_eq!(offset, a.size(cx).align_to(b.align(cx).abi));
+                    debug_assert_eq!(layout.size, b.size(cx));
+                    b_val
+                })
+            }
+            // everything else is a bug
+            _ => bug!("invalid field access on immediate {}, layout {:#?}", self, self.layout),
+        };
+
+        ImmTy::from_immediate(inner_val, layout)
+    }
+}
+
+impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+        assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here
+        Ok(MemPlaceMeta::None)
+    }
+
+    fn offset_with_meta(
+        &self,
+        offset: Size,
+        meta: MemPlaceMeta<Prov>,
+        layout: TyAndLayout<'tcx>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, Self> {
+        assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway
+        Ok(self.offset_(offset, layout, cx))
+    }
+
+    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        Ok(self.clone().into())
+    }
 }
 
 impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
-    pub fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
-        if self.layout.is_unsized() {
-            if matches!(self.op, Operand::Immediate(Immediate::Uninit)) {
-                // Uninit unsized places shouldn't occur. In the interpreter we have them
-                // temporarily for unsized arguments before their value is put in; in ConstProp they
-                // remain uninit and this code can actually be reached.
-                throw_inval!(UninitUnsizedLocal);
+    // Provided as inherent method since it doesn't need the `ecx` of `Projectable::meta`.
+    pub fn meta(&self) -> InterpResult<'tcx, MemPlaceMeta<Prov>> {
+        Ok(if self.layout.is_unsized() {
+            if matches!(self.op, Operand::Immediate(_)) {
+                // Unsized immediate OpTy cannot occur. We create a MemPlace for all unsized locals during argument passing.
+                // However, ConstProp doesn't do that, so we can run into this nonsense situation.
+                throw_inval!(ConstPropNonsense);
             }
             // There are no unsized immediates.
-            self.assert_mem_place().len(cx)
+            self.assert_mem_place().meta
         } else {
-            match self.layout.fields {
-                abi::FieldsShape::Array { count, .. } => Ok(count),
-                _ => bug!("len not supported on sized type {:?}", self.layout.ty),
-            }
-        }
+            MemPlaceMeta::None
+        })
     }
+}
 
-    /// Replace the layout of this operand. There's basically no sanity check that this makes sense,
-    /// you better know what you are doing! If this is an immediate, applying the wrong layout can
-    /// not just lead to invalid data, it can actually *shift the data around* since the offsets of
-    /// a ScalarPair are entirely determined by the layout, not the data.
-    pub fn transmute(&self, layout: TyAndLayout<'tcx>) -> Self {
-        assert_eq!(
-            self.layout.size, layout.size,
-            "transmuting with a size change, that doesn't seem right"
-        );
-        OpTy { layout, ..*self }
+impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
     }
 
-    /// Offset the operand in memory (if possible) and change its metadata.
-    ///
-    /// This can go wrong very easily if you give the wrong layout for the new place!
-    pub(super) fn offset_with_meta(
+    fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+        self.meta()
+    }
+
+    fn offset_with_meta(
         &self,
         offset: Size,
         meta: MemPlaceMeta<Prov>,
@@ -286,28 +357,43 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
         match self.as_mplace_or_imm() {
             Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()),
             Right(imm) => {
-                assert!(
-                    matches!(*imm, Immediate::Uninit),
-                    "Scalar/ScalarPair cannot be offset into"
-                );
                 assert!(!meta.has_meta()); // no place to store metadata here
                 // Every part of an uninit is uninit.
-                Ok(ImmTy::uninit(layout).into())
+                Ok(imm.offset(offset, layout, cx)?.into())
             }
         }
     }
 
-    /// Offset the operand in memory (if possible).
-    ///
-    /// This can go wrong very easily if you give the wrong layout for the new place!
-    pub fn offset(
+    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
         &self,
-        offset: Size,
-        layout: TyAndLayout<'tcx>,
-        cx: &impl HasDataLayout,
-    ) -> InterpResult<'tcx, Self> {
-        assert!(layout.is_sized());
-        self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        Ok(self.clone())
+    }
+}
+
+pub trait Readable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
+    fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>>;
+}
+
+impl<'tcx, Prov: Provenance + 'static> Readable<'tcx, Prov> for OpTy<'tcx, Prov> {
+    #[inline(always)]
+    fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+        self.as_mplace_or_imm()
+    }
+}
+
+impl<'tcx, Prov: Provenance + 'static> Readable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+    #[inline(always)]
+    fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+        Left(self.clone())
+    }
+}
+
+impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for ImmTy<'tcx, Prov> {
+    #[inline(always)]
+    fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+        Right(self.clone())
     }
 }
 
@@ -383,14 +469,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// ConstProp needs it, though.
     pub fn read_immediate_raw(
         &self,
-        src: &OpTy<'tcx, M::Provenance>,
+        src: &impl Readable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, Either<MPlaceTy<'tcx, M::Provenance>, ImmTy<'tcx, M::Provenance>>> {
         Ok(match src.as_mplace_or_imm() {
             Left(ref mplace) => {
                 if let Some(val) = self.read_immediate_from_mplace_raw(mplace)? {
                     Right(val)
                 } else {
-                    Left(*mplace)
+                    Left(mplace.clone())
                 }
             }
             Right(val) => Right(val),
@@ -403,14 +489,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     #[inline(always)]
     pub fn read_immediate(
         &self,
-        op: &OpTy<'tcx, M::Provenance>,
+        op: &impl Readable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
         if !matches!(
-            op.layout.abi,
+            op.layout().abi,
             Abi::Scalar(abi::Scalar::Initialized { .. })
                 | Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
         ) {
-            span_bug!(self.cur_span(), "primitive read not possible for type: {:?}", op.layout.ty);
+            span_bug!(
+                self.cur_span(),
+                "primitive read not possible for type: {:?}",
+                op.layout().ty
+            );
         }
         let imm = self.read_immediate_raw(op)?.right().unwrap();
         if matches!(*imm, Immediate::Uninit) {
@@ -422,7 +512,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// Read a scalar from a place
     pub fn read_scalar(
         &self,
-        op: &OpTy<'tcx, M::Provenance>,
+        op: &impl Readable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
         Ok(self.read_immediate(op)?.to_scalar())
     }
@@ -433,16 +523,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// Read a pointer from a place.
     pub fn read_pointer(
         &self,
-        op: &OpTy<'tcx, M::Provenance>,
+        op: &impl Readable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
         self.read_scalar(op)?.to_pointer(self)
     }
     /// Read a pointer-sized unsigned integer from a place.
-    pub fn read_target_usize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, u64> {
+    pub fn read_target_usize(
+        &self,
+        op: &impl Readable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, u64> {
         self.read_scalar(op)?.to_target_usize(self)
     }
     /// Read a pointer-sized signed integer from a place.
-    pub fn read_target_isize(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx, i64> {
+    pub fn read_target_isize(
+        &self,
+        op: &impl Readable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, i64> {
         self.read_scalar(op)?.to_target_isize(self)
     }
 
@@ -497,18 +593,28 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// Every place can be read from, so we can turn them into an operand.
     /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
     /// will never actually read from memory.
-    #[inline(always)]
     pub fn place_to_op(
         &self,
         place: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        let op = match **place {
-            Place::Ptr(mplace) => Operand::Indirect(mplace),
-            Place::Local { frame, local } => {
-                *self.local_to_op(&self.stack()[frame], local, None)?
+        match place.as_mplace_or_local() {
+            Left(mplace) => Ok(mplace.into()),
+            Right((frame, local, offset)) => {
+                let base = self.local_to_op(&self.stack()[frame], local, None)?;
+                let mut field = if let Some(offset) = offset {
+                    // This got offset. We can be sure that the field is sized.
+                    base.offset(offset, place.layout, self)?
+                } else {
+                    assert_eq!(place.layout, base.layout);
+                    // Unsized cases are possible here since an unsized local will be a
+                    // `Place::Local` until the first projection calls `place_to_op` to extract the
+                    // underlying mplace.
+                    base
+                };
+                field.align = Some(place.align);
+                Ok(field)
             }
-        };
-        Ok(OpTy { op, layout: place.layout, align: Some(place.align) })
+        }
     }
 
     /// Evaluate a place with the goal of reading from it. This lets us sometimes
@@ -525,7 +631,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let mut op = self.local_to_op(self.frame(), mir_place.local, layout)?;
         // Using `try_fold` turned out to be bad for performance, hence the loop.
         for elem in mir_place.projection.iter() {
-            op = self.operand_projection(&op, elem)?
+            op = self.project(&op, elem)?
         }
 
         trace!("eval_place_to_op: got {:?}", *op);
@@ -592,10 +698,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             ty::ConstKind::Unevaluated(uv) => {
                 let instance = self.resolve(uv.def, uv.args)?;
                 let cid = GlobalId { instance, promoted: None };
-                self.ctfe_query(span, |tcx| {
-                    tcx.eval_to_valtree(self.param_env.with_const().and(cid))
-                })?
-                .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"))
+                self.ctfe_query(span, |tcx| tcx.eval_to_valtree(self.param_env.and(cid)))?
+                    .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"))
             }
             ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => {
                 span_bug!(self.cur_span(), "unexpected ConstKind in ctfe: {val:?}")
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index e04764636cc..eb064578067 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -24,8 +24,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         debug_assert_eq!(
             Ty::new_tup(self.tcx.tcx, &[ty, self.tcx.types.bool]),
             dest.layout.ty,
-            "type mismatch for result of {:?}",
-            op,
+            "type mismatch for result of {op:?}",
         );
         // Write the result to `dest`.
         if let Abi::ScalarPair(..) = dest.layout.abi {
@@ -38,9 +37,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             // With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to
             // do a component-wise write here. This code path is slower than the above because
             // `place_field` will have to `force_allocate` locals here.
-            let val_field = self.place_field(&dest, 0)?;
+            let val_field = self.project_field(dest, 0)?;
             self.write_scalar(val, &val_field)?;
-            let overflowed_field = self.place_field(&dest, 1)?;
+            let overflowed_field = self.project_field(dest, 1)?;
             self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
         }
         Ok(())
@@ -56,7 +55,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         dest: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
         let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
-        assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op);
+        assert_eq!(ty, dest.layout.ty, "type mismatch for result of {op:?}");
         self.write_scalar(val, dest)
     }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index a9b2b43f1e6..96a960118ce 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -2,11 +2,14 @@
 //! into a place.
 //! All high-level functions to write to memory work on places as destinations.
 
+use std::assert_matches::assert_matches;
+
 use either::{Either, Left, Right};
 
 use rustc_ast::Mutability;
 use rustc_index::IndexSlice;
 use rustc_middle::mir;
+use rustc_middle::mir::interpret::PointerArithmetic;
 use rustc_middle::ty;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::Ty;
@@ -15,7 +18,7 @@ use rustc_target::abi::{self, Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_V
 use super::{
     alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
     ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand,
-    Pointer, Provenance, Scalar,
+    Pointer, Projectable, Provenance, Readable, Scalar,
 };
 
 #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@@ -44,6 +47,27 @@ impl<Prov: Provenance> MemPlaceMeta<Prov> {
             Self::None => false,
         }
     }
+
+    pub(crate) fn len<'tcx>(
+        &self,
+        layout: TyAndLayout<'tcx>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, u64> {
+        if layout.is_unsized() {
+            // We need to consult `meta` metadata
+            match layout.ty.kind() {
+                ty::Slice(..) | ty::Str => self.unwrap_meta().to_target_usize(cx),
+                _ => bug!("len not supported on unsized type {:?}", layout.ty),
+            }
+        } else {
+            // Go through the layout. There are lots of types that support a length,
+            // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
+            match layout.fields {
+                abi::FieldsShape::Array { count, .. } => Ok(count),
+                _ => bug!("len not supported on sized type {:?}", layout.ty),
+            }
+        }
+    }
 }
 
 #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@@ -57,7 +81,7 @@ pub struct MemPlace<Prov: Provenance = AllocId> {
 }
 
 /// A MemPlace with its layout. Constructing it is only possible in this module.
-#[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)]
+#[derive(Clone, Hash, Eq, PartialEq, Debug)]
 pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
     mplace: MemPlace<Prov>,
     pub layout: TyAndLayout<'tcx>,
@@ -68,14 +92,26 @@ pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
     pub align: Align,
 }
 
+impl<'tcx, Prov: Provenance> std::ops::Deref for MPlaceTy<'tcx, Prov> {
+    type Target = MemPlace<Prov>;
+    #[inline(always)]
+    fn deref(&self) -> &MemPlace<Prov> {
+        &self.mplace
+    }
+}
+
 #[derive(Copy, Clone, Debug)]
 pub enum Place<Prov: Provenance = AllocId> {
     /// A place referring to a value allocated in the `Memory` system.
     Ptr(MemPlace<Prov>),
 
-    /// To support alloc-free locals, we are able to write directly to a local.
+    /// To support alloc-free locals, we are able to write directly to a local. The offset indicates
+    /// where in the local this place is located; if it is `None`, no projection has been applied.
+    /// Such projections are meaningful even if the offset is 0, since they can change layouts.
     /// (Without that optimization, we'd just always be a `MemPlace`.)
-    Local { frame: usize, local: mir::Local },
+    /// Note that this only stores the frame index, not the thread this frame belongs to -- that is
+    /// implicit. This means a `Place` must never be moved across interpreter thread boundaries!
+    Local { frame: usize, local: mir::Local, offset: Option<Size> },
 }
 
 #[derive(Clone, Debug)]
@@ -97,14 +133,6 @@ impl<'tcx, Prov: Provenance> std::ops::Deref for PlaceTy<'tcx, Prov> {
     }
 }
 
-impl<'tcx, Prov: Provenance> std::ops::Deref for MPlaceTy<'tcx, Prov> {
-    type Target = MemPlace<Prov>;
-    #[inline(always)]
-    fn deref(&self) -> &MemPlace<Prov> {
-        &self.mplace
-    }
-}
-
 impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
     #[inline(always)]
     fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
@@ -112,26 +140,17 @@ impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov>
     }
 }
 
-impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
-    #[inline(always)]
-    fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
-        PlaceTy { place: Place::Ptr(**mplace), layout: mplace.layout, align: mplace.align }
-    }
-}
-
-impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
-    #[inline(always)]
-    fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
-        PlaceTy { place: Place::Ptr(**mplace), layout: mplace.layout, align: mplace.align }
-    }
-}
-
 impl<Prov: Provenance> MemPlace<Prov> {
     #[inline(always)]
     pub fn from_ptr(ptr: Pointer<Option<Prov>>) -> Self {
         MemPlace { ptr, meta: MemPlaceMeta::None }
     }
 
+    #[inline(always)]
+    pub fn from_ptr_with_meta(ptr: Pointer<Option<Prov>>, meta: MemPlaceMeta<Prov>) -> Self {
+        MemPlace { ptr, meta }
+    }
+
     /// Adjust the provenance of the main pointer (metadata is unaffected).
     pub fn map_provenance(self, f: impl FnOnce(Option<Prov>) -> Option<Prov>) -> Self {
         MemPlace { ptr: self.ptr.map_provenance(f), ..self }
@@ -150,7 +169,8 @@ impl<Prov: Provenance> MemPlace<Prov> {
     }
 
     #[inline]
-    pub(super) fn offset_with_meta<'tcx>(
+    // Not called `offset_with_meta` to avoid confusion with the trait method.
+    fn offset_with_meta_<'tcx>(
         self,
         offset: Size,
         meta: MemPlaceMeta<Prov>,
@@ -164,19 +184,6 @@ impl<Prov: Provenance> MemPlace<Prov> {
     }
 }
 
-impl<Prov: Provenance> Place<Prov> {
-    /// Asserts that this points to some local variable.
-    /// Returns the frame idx and the variable idx.
-    #[inline]
-    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
-    pub fn assert_local(&self) -> (usize, mir::Local) {
-        match self {
-            Place::Local { frame, local } => (*frame, *local),
-            _ => bug!("assert_local: expected Place::Local, got {:?}", self),
-        }
-    }
-}
-
 impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
     /// Produces a MemPlace that works for ZST but nothing else.
     /// Conceptually this is a new allocation, but it doesn't actually create an allocation so you
@@ -189,11 +196,39 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
         MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None }, layout, align }
     }
 
-    /// Offset the place in memory and change its metadata.
-    ///
-    /// This can go wrong very easily if you give the wrong layout for the new place!
     #[inline]
-    pub(crate) fn offset_with_meta(
+    pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self {
+        MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi }
+    }
+
+    #[inline]
+    pub fn from_aligned_ptr_with_meta(
+        ptr: Pointer<Option<Prov>>,
+        layout: TyAndLayout<'tcx>,
+        meta: MemPlaceMeta<Prov>,
+    ) -> Self {
+        MPlaceTy {
+            mplace: MemPlace::from_ptr_with_meta(ptr, meta),
+            layout,
+            align: layout.align.abi,
+        }
+    }
+}
+
+impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+        Ok(self.meta)
+    }
+
+    fn offset_with_meta(
         &self,
         offset: Size,
         meta: MemPlaceMeta<Prov>,
@@ -201,58 +236,65 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
         cx: &impl HasDataLayout,
     ) -> InterpResult<'tcx, Self> {
         Ok(MPlaceTy {
-            mplace: self.mplace.offset_with_meta(offset, meta, cx)?,
+            mplace: self.mplace.offset_with_meta_(offset, meta, cx)?,
             align: self.align.restrict_for_offset(offset),
             layout,
         })
     }
 
-    /// Offset the place in memory.
-    ///
-    /// This can go wrong very easily if you give the wrong layout for the new place!
-    pub fn offset(
+    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
         &self,
-        offset: Size,
-        layout: TyAndLayout<'tcx>,
-        cx: &impl HasDataLayout,
-    ) -> InterpResult<'tcx, Self> {
-        assert!(layout.is_sized());
-        self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        Ok(self.clone().into())
     }
+}
 
-    #[inline]
-    pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self {
-        MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi }
+impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
     }
 
-    #[inline]
-    pub fn from_aligned_ptr_with_meta(
-        ptr: Pointer<Option<Prov>>,
-        layout: TyAndLayout<'tcx>,
-        meta: MemPlaceMeta<Prov>,
-    ) -> Self {
-        let mut mplace = MemPlace::from_ptr(ptr);
-        mplace.meta = meta;
-
-        MPlaceTy { mplace, layout, align: layout.align.abi }
+    fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+        ecx.place_meta(self)
     }
 
-    #[inline]
-    pub(crate) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
-        if self.layout.is_unsized() {
-            // We need to consult `meta` metadata
-            match self.layout.ty.kind() {
-                ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_target_usize(cx),
-                _ => bug!("len not supported on unsized type {:?}", self.layout.ty),
-            }
-        } else {
-            // Go through the layout. There are lots of types that support a length,
-            // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
-            match self.layout.fields {
-                abi::FieldsShape::Array { count, .. } => Ok(count),
-                _ => bug!("len not supported on sized type {:?}", self.layout.ty),
+    fn offset_with_meta(
+        &self,
+        offset: Size,
+        meta: MemPlaceMeta<Prov>,
+        layout: TyAndLayout<'tcx>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, Self> {
+        Ok(match self.as_mplace_or_local() {
+            Left(mplace) => mplace.offset_with_meta(offset, meta, layout, cx)?.into(),
+            Right((frame, local, old_offset)) => {
+                assert_matches!(meta, MemPlaceMeta::None); // we couldn't store it anyway...
+                let new_offset = cx
+                    .data_layout()
+                    .offset(old_offset.unwrap_or(Size::ZERO).bytes(), offset.bytes())?;
+                PlaceTy {
+                    place: Place::Local {
+                        frame,
+                        local,
+                        offset: Some(Size::from_bytes(new_offset)),
+                    },
+                    align: self.align.restrict_for_offset(offset),
+                    layout,
+                }
             }
-        }
+        })
+    }
+
+    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        ecx.place_to_op(self)
     }
 }
 
@@ -280,13 +322,15 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
     }
 }
 
-impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance + 'static> PlaceTy<'tcx, Prov> {
     /// A place is either an mplace or some local.
     #[inline]
-    pub fn as_mplace_or_local(&self) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local)> {
+    pub fn as_mplace_or_local(
+        &self,
+    ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>)> {
         match **self {
             Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout, align: self.align }),
-            Place::Local { frame, local } => Right((frame, local)),
+            Place::Local { frame, local, offset } => Right((frame, local, offset)),
         }
     }
 
@@ -302,12 +346,74 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
     }
 }
 
+pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
+    fn as_mplace_or_local(
+        &self,
+    ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>;
+
+    fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &mut InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>>;
+}
+
+impl<'tcx, Prov: Provenance + 'static> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
+    #[inline(always)]
+    fn as_mplace_or_local(
+        &self,
+    ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>
+    {
+        self.as_mplace_or_local()
+            .map_right(|(frame, local, offset)| (frame, local, offset, self.align, self.layout))
+    }
+
+    #[inline(always)]
+    fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &mut InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
+        ecx.force_allocation(self)
+    }
+}
+
+impl<'tcx, Prov: Provenance + 'static> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+    #[inline(always)]
+    fn as_mplace_or_local(
+        &self,
+    ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>, Align, TyAndLayout<'tcx>)>
+    {
+        Left(self.clone())
+    }
+
+    #[inline(always)]
+    fn force_mplace<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        _ecx: &mut InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
+        Ok(self.clone())
+    }
+}
+
 // FIXME: Working around https://github.com/rust-lang/rust/issues/54385
 impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
 where
     Prov: Provenance + 'static,
     M: Machine<'mir, 'tcx, Provenance = Prov>,
 {
+    /// Get the metadata of the given place.
+    pub(super) fn place_meta(
+        &self,
+        place: &PlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+        if place.layout.is_unsized() {
+            // For `Place::Local`, the metadata is stored with the local, not the place. So we have
+            // to look that up first.
+            self.place_to_op(place)?.meta()
+        } else {
+            Ok(MemPlaceMeta::None)
+        }
+    }
+
     /// Take a value, which represents a (thin or wide) reference, and make it a place.
     /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`.
     ///
@@ -327,18 +433,16 @@ where
             Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
         };
 
-        let mplace = MemPlace { ptr: ptr.to_pointer(self)?, meta };
         // `ref_to_mplace` is called on raw pointers even if they don't actually get dereferenced;
         // we hence can't call `size_and_align_of` since that asserts more validity than we want.
-        let align = layout.align.abi;
-        Ok(MPlaceTy { mplace, layout, align })
+        Ok(MPlaceTy::from_aligned_ptr_with_meta(ptr.to_pointer(self)?, layout, meta))
     }
 
     /// Take an operand, representing a pointer, and dereference it to a place.
     #[instrument(skip(self), level = "debug")]
     pub fn deref_operand(
         &self,
-        src: &OpTy<'tcx, M::Provenance>,
+        src: &impl Readable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
         let val = self.read_immediate(src)?;
         trace!("deref to {} on {:?}", val.layout.ty, *val);
@@ -348,7 +452,7 @@ where
         }
 
         let mplace = self.ref_to_mplace(&val)?;
-        self.check_mplace(mplace)?;
+        self.check_mplace(&mplace)?;
         Ok(mplace)
     }
 
@@ -379,7 +483,7 @@ where
     }
 
     /// Check if this mplace is dereferenceable and sufficiently aligned.
-    pub fn check_mplace(&self, mplace: MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+    pub fn check_mplace(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
         let (size, _align) = self
             .size_and_align_of_mplace(&mplace)?
             .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
@@ -422,7 +526,7 @@ where
         local: mir::Local,
     ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
         let layout = self.layout_of_local(&self.stack()[frame], local, None)?;
-        let place = Place::Local { frame, local };
+        let place = Place::Local { frame, local, offset: None };
         Ok(PlaceTy { place, layout, align: layout.align.abi })
     }
 
@@ -430,13 +534,13 @@ where
     /// place; for reading, a more efficient alternative is `eval_place_to_op`.
     #[instrument(skip(self), level = "debug")]
     pub fn eval_place(
-        &mut self,
+        &self,
         mir_place: mir::Place<'tcx>,
     ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
         let mut place = self.local_to_place(self.frame_idx(), mir_place.local)?;
         // Using `try_fold` turned out to be bad for performance, hence the loop.
         for elem in mir_place.projection.iter() {
-            place = self.place_projection(&place, elem)?
+            place = self.project(&place, elem)?
         }
 
         trace!("{:?}", self.dump_place(place.place));
@@ -463,13 +567,13 @@ where
     pub fn write_immediate(
         &mut self,
         src: Immediate<M::Provenance>,
-        dest: &PlaceTy<'tcx, M::Provenance>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
         self.write_immediate_no_validate(src, dest)?;
 
-        if M::enforce_validity(self, dest.layout) {
+        if M::enforce_validity(self, dest.layout()) {
             // Data got changed, better make sure it matches the type!
-            self.validate_operand(&self.place_to_op(dest)?)?;
+            self.validate_operand(&dest.to_op(self)?)?;
         }
 
         Ok(())
@@ -480,7 +584,7 @@ where
     pub fn write_scalar(
         &mut self,
         val: impl Into<Scalar<M::Provenance>>,
-        dest: &PlaceTy<'tcx, M::Provenance>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
         self.write_immediate(Immediate::Scalar(val.into()), dest)
     }
@@ -490,7 +594,7 @@ where
     pub fn write_pointer(
         &mut self,
         ptr: impl Into<Pointer<Option<M::Provenance>>>,
-        dest: &PlaceTy<'tcx, M::Provenance>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
         self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest)
     }
@@ -501,32 +605,63 @@ where
     fn write_immediate_no_validate(
         &mut self,
         src: Immediate<M::Provenance>,
-        dest: &PlaceTy<'tcx, M::Provenance>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
-        assert!(dest.layout.is_sized(), "Cannot write unsized data");
-        trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
+        assert!(dest.layout().is_sized(), "Cannot write unsized immediate data");
 
         // See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
         // but not factored as a separate function.
-        let mplace = match dest.place {
-            Place::Local { frame, local } => {
-                match M::access_local_mut(self, frame, local)? {
-                    Operand::Immediate(local) => {
-                        // Local can be updated in-place.
-                        *local = src;
-                        return Ok(());
-                    }
-                    Operand::Indirect(mplace) => {
-                        // The local is in memory, go on below.
-                        *mplace
+        let mplace = match dest.as_mplace_or_local() {
+            Right((frame, local, offset, align, layout)) => {
+                if offset.is_some() {
+                    // This has been projected to a part of this local. We could have complicated
+                    // logic to still keep this local as an `Operand`... but it's much easier to
+                    // just fall back to the indirect path.
+                    dest.force_mplace(self)?
+                } else {
+                    match M::access_local_mut(self, frame, local)? {
+                        Operand::Immediate(local_val) => {
+                            // Local can be updated in-place.
+                            *local_val = src;
+                            // Double-check that the value we are storing and the local fit to each other.
+                            // (*After* doing the update for borrow checker reasons.)
+                            if cfg!(debug_assertions) {
+                                let local_layout =
+                                    self.layout_of_local(&self.stack()[frame], local, None)?;
+                                match (src, local_layout.abi) {
+                                    (Immediate::Scalar(scalar), Abi::Scalar(s)) => {
+                                        assert_eq!(scalar.size(), s.size(self))
+                                    }
+                                    (
+                                        Immediate::ScalarPair(a_val, b_val),
+                                        Abi::ScalarPair(a, b),
+                                    ) => {
+                                        assert_eq!(a_val.size(), a.size(self));
+                                        assert_eq!(b_val.size(), b.size(self));
+                                    }
+                                    (Immediate::Uninit, _) => {}
+                                    (src, abi) => {
+                                        bug!(
+                                            "value {src:?} cannot be written into local with type {} (ABI {abi:?})",
+                                            local_layout.ty
+                                        )
+                                    }
+                                };
+                            }
+                            return Ok(());
+                        }
+                        Operand::Indirect(mplace) => {
+                            // The local is in memory, go on below.
+                            MPlaceTy { mplace: *mplace, align, layout }
+                        }
                     }
                 }
             }
-            Place::Ptr(mplace) => mplace, // already referring to memory
+            Left(mplace) => mplace, // already referring to memory
         };
 
         // This is already in memory, write there.
-        self.write_immediate_to_mplace_no_validate(src, dest.layout, dest.align, mplace)
+        self.write_immediate_to_mplace_no_validate(src, mplace.layout, mplace.align, mplace.mplace)
     }
 
     /// Write an immediate to memory.
@@ -590,18 +725,29 @@ where
         }
     }
 
-    pub fn write_uninit(&mut self, dest: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+    pub fn write_uninit(
+        &mut self,
+        dest: &impl Writeable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
         let mplace = match dest.as_mplace_or_local() {
             Left(mplace) => mplace,
-            Right((frame, local)) => {
-                match M::access_local_mut(self, frame, local)? {
-                    Operand::Immediate(local) => {
-                        *local = Immediate::Uninit;
-                        return Ok(());
-                    }
-                    Operand::Indirect(mplace) => {
-                        // The local is in memory, go on below.
-                        MPlaceTy { mplace: *mplace, layout: dest.layout, align: dest.align }
+            Right((frame, local, offset, align, layout)) => {
+                if offset.is_some() {
+                    // This has been projected to a part of this local. We could have complicated
+                    // logic to still keep this local as an `Operand`... but it's much easier to
+                    // just fall back to the indirect path.
+                    // FIXME: share the logic with `write_immediate_no_validate`.
+                    dest.force_mplace(self)?
+                } else {
+                    match M::access_local_mut(self, frame, local)? {
+                        Operand::Immediate(local) => {
+                            *local = Immediate::Uninit;
+                            return Ok(());
+                        }
+                        Operand::Indirect(mplace) => {
+                            // The local is in memory, go on below.
+                            MPlaceTy { mplace: *mplace, layout, align }
+                        }
                     }
                 }
             }
@@ -620,15 +766,15 @@ where
     #[instrument(skip(self), level = "debug")]
     pub fn copy_op(
         &mut self,
-        src: &OpTy<'tcx, M::Provenance>,
-        dest: &PlaceTy<'tcx, M::Provenance>,
+        src: &impl Readable<'tcx, M::Provenance>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
         allow_transmute: bool,
     ) -> InterpResult<'tcx> {
         self.copy_op_no_validate(src, dest, allow_transmute)?;
 
-        if M::enforce_validity(self, dest.layout) {
+        if M::enforce_validity(self, dest.layout()) {
             // Data got changed, better make sure it matches the type!
-            self.validate_operand(&self.place_to_op(dest)?)?;
+            self.validate_operand(&dest.to_op(self)?)?;
         }
 
         Ok(())
@@ -641,20 +787,20 @@ where
     #[instrument(skip(self), level = "debug")]
     fn copy_op_no_validate(
         &mut self,
-        src: &OpTy<'tcx, M::Provenance>,
-        dest: &PlaceTy<'tcx, M::Provenance>,
+        src: &impl Readable<'tcx, M::Provenance>,
+        dest: &impl Writeable<'tcx, M::Provenance>,
         allow_transmute: bool,
     ) -> InterpResult<'tcx> {
         // We do NOT compare the types for equality, because well-typed code can
         // actually "transmute" `&mut T` to `&T` in an assignment without a cast.
         let layout_compat =
-            mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout);
+            mir_assign_valid_types(*self.tcx, self.param_env, src.layout(), dest.layout());
         if !allow_transmute && !layout_compat {
             span_bug!(
                 self.cur_span(),
                 "type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
-                src.layout.ty,
-                dest.layout.ty,
+                src.layout().ty,
+                dest.layout().ty,
             );
         }
 
@@ -667,13 +813,13 @@ where
                 // actually sized, due to a trivially false where-clause
                 // predicate like `where Self: Sized` with `Self = dyn Trait`.
                 // See #102553 for an example of such a predicate.
-                if src.layout.is_unsized() {
-                    throw_inval!(SizeOfUnsizedType(src.layout.ty));
+                if src.layout().is_unsized() {
+                    throw_inval!(SizeOfUnsizedType(src.layout().ty));
                 }
-                if dest.layout.is_unsized() {
-                    throw_inval!(SizeOfUnsizedType(dest.layout.ty));
+                if dest.layout().is_unsized() {
+                    throw_inval!(SizeOfUnsizedType(dest.layout().ty));
                 }
-                assert_eq!(src.layout.size, dest.layout.size);
+                assert_eq!(src.layout().size, dest.layout().size);
                 // Yay, we got a value that we can write directly.
                 return if layout_compat {
                     self.write_immediate_no_validate(*src_val, dest)
@@ -682,10 +828,10 @@ where
                     // loaded using the offsets defined by `src.layout`. When we put this back into
                     // the destination, we have to use the same offsets! So (a) we make sure we
                     // write back to memory, and (b) we use `dest` *with the source layout*.
-                    let dest_mem = self.force_allocation(dest)?;
+                    let dest_mem = dest.force_mplace(self)?;
                     self.write_immediate_to_mplace_no_validate(
                         *src_val,
-                        src.layout,
+                        src.layout(),
                         dest_mem.align,
                         *dest_mem,
                     )
@@ -694,9 +840,9 @@ where
             Left(mplace) => mplace,
         };
         // Slow path, this does not fit into an immediate. Just memcpy.
-        trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
+        trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout().ty);
 
-        let dest = self.force_allocation(&dest)?;
+        let dest = dest.force_mplace(self)?;
         let Some((dest_size, _)) = self.size_and_align_of_mplace(&dest)? else {
             span_bug!(self.cur_span(), "copy_op needs (dynamically) sized values")
         };
@@ -728,8 +874,8 @@ where
         place: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
         let mplace = match place.place {
-            Place::Local { frame, local } => {
-                match M::access_local_mut(self, frame, local)? {
+            Place::Local { frame, local, offset } => {
+                let whole_local = match M::access_local_mut(self, frame, local)? {
                     &mut Operand::Immediate(local_val) => {
                         // We need to make an allocation.
 
@@ -742,10 +888,11 @@ where
                             throw_unsup_format!("unsized locals are not supported");
                         }
                         let mplace = *self.allocate(local_layout, MemoryKind::Stack)?;
+                        // Preserve old value. (As an optimization, we can skip this if it was uninit.)
                         if !matches!(local_val, Immediate::Uninit) {
-                            // Preserve old value. (As an optimization, we can skip this if it was uninit.)
-                            // We don't have to validate as we can assume the local
-                            // was already valid for its type.
+                            // We don't have to validate as we can assume the local was already
+                            // valid for its type. We must not use any part of `place` here, that
+                            // could be a projection to a part of the local!
                             self.write_immediate_to_mplace_no_validate(
                                 local_val,
                                 local_layout,
@@ -753,18 +900,25 @@ where
                                 mplace,
                             )?;
                         }
-                        // Now we can call `access_mut` again, asserting it goes well,
-                        // and actually overwrite things.
+                        // Now we can call `access_mut` again, asserting it goes well, and actually
+                        // overwrite things. This points to the entire allocation, not just the part
+                        // the place refers to, i.e. we do this before we apply `offset`.
                         *M::access_local_mut(self, frame, local).unwrap() =
                             Operand::Indirect(mplace);
                         mplace
                     }
                     &mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
+                };
+                if let Some(offset) = offset {
+                    whole_local.offset_with_meta_(offset, MemPlaceMeta::None, self)?
+                } else {
+                    // Preserve wide place metadata, do not call `offset`.
+                    whole_local
                 }
             }
             Place::Ptr(mplace) => mplace,
         };
-        // Return with the original layout, so that the caller can go on
+        // Return with the original layout and align, so that the caller can go on
         Ok(MPlaceTy { mplace, layout: place.layout, align: place.align })
     }
 
@@ -806,10 +960,10 @@ where
         operands: &IndexSlice<FieldIdx, mir::Operand<'tcx>>,
         dest: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
-        self.write_uninit(&dest)?;
+        self.write_uninit(dest)?;
         let (variant_index, variant_dest, active_field_index) = match *kind {
             mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
-                let variant_dest = self.place_downcast(&dest, variant_index)?;
+                let variant_dest = self.project_downcast(dest, variant_index)?;
                 (variant_index, variant_dest, active_field_index)
             }
             _ => (FIRST_VARIANT, dest.clone(), None),
@@ -819,11 +973,11 @@ where
         }
         for (field_index, operand) in operands.iter_enumerated() {
             let field_index = active_field_index.unwrap_or(field_index);
-            let field_dest = self.place_field(&variant_dest, field_index.as_usize())?;
+            let field_dest = self.project_field(&variant_dest, field_index.as_usize())?;
             let op = self.eval_operand(operand, Some(field_dest.layout))?;
             self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
         }
-        self.write_discriminant(variant_index, &dest)
+        self.write_discriminant(variant_index, dest)
     }
 
     pub fn raw_const_to_mplace(
@@ -859,22 +1013,24 @@ where
         Ok((mplace, vtable))
     }
 
-    /// Turn an operand with a `dyn* Trait` type into an operand with the actual dynamic type.
-    /// Aso returns the vtable.
-    pub(super) fn unpack_dyn_star(
+    /// Turn a `dyn* Trait` type into an value with the actual dynamic type.
+    /// Also returns the vtable.
+    pub(super) fn unpack_dyn_star<P: Projectable<'tcx, M::Provenance>>(
         &self,
-        op: &OpTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, (OpTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> {
+        val: &P,
+    ) -> InterpResult<'tcx, (P, Pointer<Option<M::Provenance>>)> {
         assert!(
-            matches!(op.layout.ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
+            matches!(val.layout().ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
             "`unpack_dyn_star` only makes sense on `dyn*` types"
         );
-        let data = self.operand_field(&op, 0)?;
-        let vtable = self.operand_field(&op, 1)?;
-        let vtable = self.read_pointer(&vtable)?;
+        let data = self.project_field(val, 0)?;
+        let vtable = self.project_field(val, 1)?;
+        let vtable = self.read_pointer(&vtable.to_op(self)?)?;
         let (ty, _) = self.get_ptr_vtable(vtable)?;
         let layout = self.layout_of(ty)?;
-        let data = data.transmute(layout);
+        // `data` is already the right thing but has the wrong type. So we transmute it, by
+        // projecting with offset 0.
+        let data = data.transmute(layout, self)?;
         Ok((data, vtable))
     }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index d7d31fe1887..bce43aedb69 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -7,18 +7,70 @@
 //! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually
 //! implement the logic on OpTy, and MPlaceTy calls that.
 
-use either::{Left, Right};
-
 use rustc_middle::mir;
 use rustc_middle::ty;
-use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::Ty;
-use rustc_target::abi::{self, Abi, VariantIdx};
+use rustc_middle::ty::TyCtxt;
+use rustc_target::abi::HasDataLayout;
+use rustc_target::abi::Size;
+use rustc_target::abi::{self, VariantIdx};
+
+use super::{InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Provenance, Scalar};
 
-use super::{
-    ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, PlaceTy,
-    Provenance, Scalar,
-};
+/// A thing that we can project into, and that has a layout.
+pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
+    /// Get the layout.
+    fn layout(&self) -> TyAndLayout<'tcx>;
+
+    /// Get the metadata of a wide value.
+    fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>>;
+
+    fn len<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, u64> {
+        self.meta(ecx)?.len(self.layout(), ecx)
+    }
+
+    /// Offset the value by the given amount, replacing the layout and metadata.
+    fn offset_with_meta(
+        &self,
+        offset: Size,
+        meta: MemPlaceMeta<Prov>,
+        layout: TyAndLayout<'tcx>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, Self>;
+
+    fn offset(
+        &self,
+        offset: Size,
+        layout: TyAndLayout<'tcx>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, Self> {
+        assert!(layout.is_sized());
+        self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+    }
+
+    fn transmute(
+        &self,
+        layout: TyAndLayout<'tcx>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, Self> {
+        assert_eq!(self.layout().size, layout.size);
+        self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, cx)
+    }
+
+    /// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for
+    /// reading from this thing.
+    fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
+}
 
 // FIXME: Working around https://github.com/rust-lang/rust/issues/54385
 impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
@@ -26,167 +78,83 @@ where
     Prov: Provenance + 'static,
     M: Machine<'mir, 'tcx, Provenance = Prov>,
 {
-    //# Field access
-
     /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
     /// always possible without allocating, so it can take `&self`. Also return the field's layout.
-    /// This supports both struct and array fields.
+    /// This supports both struct and array fields, but not slices!
     ///
     /// This also works for arrays, but then the `usize` index type is restricting.
     /// For indexing into arrays, use `mplace_index`.
-    pub fn mplace_field(
+    pub fn project_field<P: Projectable<'tcx, M::Provenance>>(
         &self,
-        base: &MPlaceTy<'tcx, M::Provenance>,
+        base: &P,
         field: usize,
-    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
-        let offset = base.layout.fields.offset(field);
-        let field_layout = base.layout.field(self, field);
+    ) -> InterpResult<'tcx, P> {
+        // Slices nominally have length 0, so they will panic somewhere in `fields.offset`.
+        debug_assert!(
+            !matches!(base.layout().ty.kind(), ty::Slice(..)),
+            "`field` projection called on a slice -- call `index` projection instead"
+        );
+        let offset = base.layout().fields.offset(field);
+        let field_layout = base.layout().field(self, field);
 
         // Offset may need adjustment for unsized fields.
         let (meta, offset) = if field_layout.is_unsized() {
+            if base.layout().is_sized() {
+                // An unsized field of a sized type? Sure...
+                // But const-prop actually feeds us such nonsense MIR!
+                throw_inval!(ConstPropNonsense);
+            }
+            let base_meta = base.meta(self)?;
             // Re-use parent metadata to determine dynamic field layout.
             // With custom DSTS, this *will* execute user-defined code, but the same
             // happens at run-time so that's okay.
-            match self.size_and_align_of(&base.meta, &field_layout)? {
-                Some((_, align)) => (base.meta, offset.align_to(align)),
+            match self.size_and_align_of(&base_meta, &field_layout)? {
+                Some((_, align)) => (base_meta, offset.align_to(align)),
                 None => {
                     // For unsized types with an extern type tail we perform no adjustments.
                     // NOTE: keep this in sync with `PlaceRef::project_field` in the codegen backend.
-                    assert!(matches!(base.meta, MemPlaceMeta::None));
-                    (base.meta, offset)
+                    assert!(matches!(base_meta, MemPlaceMeta::None));
+                    (base_meta, offset)
                 }
             }
         } else {
-            // base.meta could be present; we might be accessing a sized field of an unsized
+            // base_meta could be present; we might be accessing a sized field of an unsized
             // struct.
             (MemPlaceMeta::None, offset)
         };
 
-        // We do not look at `base.layout.align` nor `field_layout.align`, unlike
-        // codegen -- mostly to see if we can get away with that
         base.offset_with_meta(offset, meta, field_layout, self)
     }
 
-    /// Gets the place of a field inside the place, and also the field's type.
-    /// Just a convenience function, but used quite a bit.
-    /// This is the only projection that might have a side-effect: We cannot project
-    /// into the field of a local `ScalarPair`, we have to first allocate it.
-    pub fn place_field(
-        &mut self,
-        base: &PlaceTy<'tcx, M::Provenance>,
-        field: usize,
-    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
-        // FIXME: We could try to be smarter and avoid allocation for fields that span the
-        // entire place.
-        let base = self.force_allocation(base)?;
-        Ok(self.mplace_field(&base, field)?.into())
-    }
-
-    pub fn operand_field(
+    /// Downcasting to an enum variant.
+    pub fn project_downcast<P: Projectable<'tcx, M::Provenance>>(
         &self,
-        base: &OpTy<'tcx, M::Provenance>,
-        field: usize,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        let base = match base.as_mplace_or_imm() {
-            Left(ref mplace) => {
-                // We can reuse the mplace field computation logic for indirect operands.
-                let field = self.mplace_field(mplace, field)?;
-                return Ok(field.into());
-            }
-            Right(value) => value,
-        };
-
-        let field_layout = base.layout.field(self, field);
-        let offset = base.layout.fields.offset(field);
-        // This makes several assumptions about what layouts we will encounter; we match what
-        // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
-        let field_val: Immediate<_> = match (*base, base.layout.abi) {
-            // if the entire value is uninit, then so is the field (can happen in ConstProp)
-            (Immediate::Uninit, _) => Immediate::Uninit,
-            // the field contains no information, can be left uninit
-            _ if field_layout.is_zst() => Immediate::Uninit,
-            // the field covers the entire type
-            _ if field_layout.size == base.layout.size => {
-                assert!(match (base.layout.abi, field_layout.abi) {
-                    (Abi::Scalar(..), Abi::Scalar(..)) => true,
-                    (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
-                    _ => false,
-                });
-                assert!(offset.bytes() == 0);
-                *base
-            }
-            // extract fields from types with `ScalarPair` ABI
-            (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
-                assert!(matches!(field_layout.abi, Abi::Scalar(..)));
-                Immediate::from(if offset.bytes() == 0 {
-                    debug_assert_eq!(field_layout.size, a.size(self));
-                    a_val
-                } else {
-                    debug_assert_eq!(offset, a.size(self).align_to(b.align(self).abi));
-                    debug_assert_eq!(field_layout.size, b.size(self));
-                    b_val
-                })
-            }
-            // everything else is a bug
-            _ => span_bug!(
-                self.cur_span(),
-                "invalid field access on immediate {}, layout {:#?}",
-                base,
-                base.layout
-            ),
-        };
-
-        Ok(ImmTy::from_immediate(field_val, field_layout).into())
-    }
-
-    //# Downcasting
-
-    pub fn mplace_downcast(
-        &self,
-        base: &MPlaceTy<'tcx, M::Provenance>,
+        base: &P,
         variant: VariantIdx,
-    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+    ) -> InterpResult<'tcx, P> {
+        assert!(!base.meta(self)?.has_meta());
         // Downcasts only change the layout.
         // (In particular, no check about whether this is even the active variant -- that's by design,
         // see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
-        assert!(!base.meta.has_meta());
-        let mut base = *base;
-        base.layout = base.layout.for_variant(self, variant);
-        Ok(base)
-    }
-
-    pub fn place_downcast(
-        &self,
-        base: &PlaceTy<'tcx, M::Provenance>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
-        // Downcast just changes the layout
-        let mut base = base.clone();
-        base.layout = base.layout.for_variant(self, variant);
-        Ok(base)
-    }
-
-    pub fn operand_downcast(
-        &self,
-        base: &OpTy<'tcx, M::Provenance>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        // Downcast just changes the layout
-        let mut base = base.clone();
-        base.layout = base.layout.for_variant(self, variant);
-        Ok(base)
+        // So we just "offset" by 0.
+        let layout = base.layout().for_variant(self, variant);
+        if layout.abi.is_uninhabited() {
+            // `read_discriminant` should have excluded uninhabited variants... but ConstProp calls
+            // us on dead code.
+            throw_inval!(ConstPropNonsense)
+        }
+        // This cannot be `transmute` as variants *can* have a smaller size than the entire enum.
+        base.offset(Size::ZERO, layout, self)
     }
 
-    //# Slice indexing
-
-    #[inline(always)]
-    pub fn operand_index(
+    /// Compute the offset and field layout for accessing the given index.
+    pub fn project_index<P: Projectable<'tcx, M::Provenance>>(
         &self,
-        base: &OpTy<'tcx, M::Provenance>,
+        base: &P,
         index: u64,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+    ) -> InterpResult<'tcx, P> {
         // Not using the layout method because we want to compute on u64
-        match base.layout.fields {
+        let (offset, field_layout) = match base.layout().fields {
             abi::FieldsShape::Array { stride, count: _ } => {
                 // `count` is nonsense for slices, use the dynamic length instead.
                 let len = base.len(self)?;
@@ -196,63 +164,26 @@ where
                 }
                 let offset = stride * index; // `Size` multiplication
                 // All fields have the same layout.
-                let field_layout = base.layout.field(self, 0);
-                base.offset(offset, field_layout, self)
+                let field_layout = base.layout().field(self, 0);
+                (offset, field_layout)
             }
             _ => span_bug!(
                 self.cur_span(),
                 "`mplace_index` called on non-array type {:?}",
-                base.layout.ty
+                base.layout().ty
             ),
-        }
-    }
-
-    /// Iterates over all fields of an array. Much more efficient than doing the
-    /// same by repeatedly calling `operand_index`.
-    pub fn operand_array_fields<'a>(
-        &self,
-        base: &'a OpTy<'tcx, Prov>,
-    ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, OpTy<'tcx, Prov>>> + 'a> {
-        let len = base.len(self)?; // also asserts that we have a type where this makes sense
-        let abi::FieldsShape::Array { stride, .. } = base.layout.fields else {
-            span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
         };
-        let field_layout = base.layout.field(self, 0);
-        let dl = &self.tcx.data_layout;
-        // `Size` multiplication
-        Ok((0..len).map(move |i| base.offset(stride * i, field_layout, dl)))
-    }
-
-    /// Index into an array.
-    pub fn mplace_index(
-        &self,
-        base: &MPlaceTy<'tcx, M::Provenance>,
-        index: u64,
-    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
-        Ok(self.operand_index(&base.into(), index)?.assert_mem_place())
-    }
 
-    pub fn place_index(
-        &mut self,
-        base: &PlaceTy<'tcx, M::Provenance>,
-        index: u64,
-    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
-        // There's not a lot we can do here, since we cannot have a place to a part of a local. If
-        // we are accessing the only element of a 1-element array, it's still the entire local...
-        // that doesn't seem worth it.
-        let base = self.force_allocation(base)?;
-        Ok(self.mplace_index(&base, index)?.into())
+        base.offset(offset, field_layout, self)
     }
 
-    //# ConstantIndex support
-
-    fn operand_constant_index(
+    fn project_constant_index<P: Projectable<'tcx, M::Provenance>>(
         &self,
-        base: &OpTy<'tcx, M::Provenance>,
+        base: &P,
         offset: u64,
         min_length: u64,
         from_end: bool,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+    ) -> InterpResult<'tcx, P> {
         let n = base.len(self)?;
         if n < min_length {
             // This can only be reached in ConstProp and non-rustc-MIR.
@@ -267,32 +198,38 @@ where
             offset
         };
 
-        self.operand_index(base, index)
+        self.project_index(base, index)
     }
 
-    fn place_constant_index(
-        &mut self,
-        base: &PlaceTy<'tcx, M::Provenance>,
-        offset: u64,
-        min_length: u64,
-        from_end: bool,
-    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
-        let base = self.force_allocation(base)?;
-        Ok(self
-            .operand_constant_index(&base.into(), offset, min_length, from_end)?
-            .assert_mem_place()
-            .into())
+    /// Iterates over all fields of an array. Much more efficient than doing the
+    /// same by repeatedly calling `operand_index`.
+    pub fn project_array_fields<'a, P: Projectable<'tcx, M::Provenance>>(
+        &self,
+        base: &'a P,
+    ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, P>> + 'a>
+    where
+        'tcx: 'a,
+    {
+        let abi::FieldsShape::Array { stride, .. } = base.layout().fields else {
+            span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
+        };
+        let len = base.len(self)?;
+        let field_layout = base.layout().field(self, 0);
+        let tcx: TyCtxt<'tcx> = *self.tcx;
+        // `Size` multiplication
+        Ok((0..len).map(move |i| {
+            base.offset_with_meta(stride * i, MemPlaceMeta::None, field_layout, &tcx)
+        }))
     }
 
-    //# Subslicing
-
-    fn operand_subslice(
+    /// Subslicing
+    fn project_subslice<P: Projectable<'tcx, M::Provenance>>(
         &self,
-        base: &OpTy<'tcx, M::Provenance>,
+        base: &P,
         from: u64,
         to: u64,
         from_end: bool,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+    ) -> InterpResult<'tcx, P> {
         let len = base.len(self)?; // also asserts that we have a type where this makes sense
         let actual_to = if from_end {
             if from.checked_add(to).map_or(true, |to| to > len) {
@@ -306,16 +243,20 @@ where
 
         // Not using layout method because that works with usize, and does not work with slices
         // (that have count 0 in their layout).
-        let from_offset = match base.layout.fields {
+        let from_offset = match base.layout().fields {
             abi::FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked
             _ => {
-                span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout)
+                span_bug!(
+                    self.cur_span(),
+                    "unexpected layout of index access: {:#?}",
+                    base.layout()
+                )
             }
         };
 
         // Compute meta and new layout
         let inner_len = actual_to.checked_sub(from).unwrap();
-        let (meta, ty) = match base.layout.ty.kind() {
+        let (meta, ty) = match base.layout().ty.kind() {
             // It is not nice to match on the type, but that seems to be the only way to
             // implement this.
             ty::Array(inner, _) => {
@@ -323,85 +264,43 @@ where
             }
             ty::Slice(..) => {
                 let len = Scalar::from_target_usize(inner_len, self);
-                (MemPlaceMeta::Meta(len), base.layout.ty)
+                (MemPlaceMeta::Meta(len), base.layout().ty)
             }
             _ => {
-                span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty)
+                span_bug!(
+                    self.cur_span(),
+                    "cannot subslice non-array type: `{:?}`",
+                    base.layout().ty
+                )
             }
         };
         let layout = self.layout_of(ty)?;
-        base.offset_with_meta(from_offset, meta, layout, self)
-    }
-
-    pub fn place_subslice(
-        &mut self,
-        base: &PlaceTy<'tcx, M::Provenance>,
-        from: u64,
-        to: u64,
-        from_end: bool,
-    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
-        let base = self.force_allocation(base)?;
-        Ok(self.operand_subslice(&base.into(), from, to, from_end)?.assert_mem_place().into())
-    }
-
-    //# Applying a general projection
 
-    /// Projects into a place.
-    #[instrument(skip(self), level = "trace")]
-    pub fn place_projection(
-        &mut self,
-        base: &PlaceTy<'tcx, M::Provenance>,
-        proj_elem: mir::PlaceElem<'tcx>,
-    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
-        use rustc_middle::mir::ProjectionElem::*;
-        Ok(match proj_elem {
-            OpaqueCast(ty) => {
-                let mut place = base.clone();
-                place.layout = self.layout_of(ty)?;
-                place
-            }
-            Field(field, _) => self.place_field(base, field.index())?,
-            Downcast(_, variant) => self.place_downcast(base, variant)?,
-            Deref => self.deref_operand(&self.place_to_op(base)?)?.into(),
-            Index(local) => {
-                let layout = self.layout_of(self.tcx.types.usize)?;
-                let n = self.local_to_op(self.frame(), local, Some(layout))?;
-                let n = self.read_target_usize(&n)?;
-                self.place_index(base, n)?
-            }
-            ConstantIndex { offset, min_length, from_end } => {
-                self.place_constant_index(base, offset, min_length, from_end)?
-            }
-            Subslice { from, to, from_end } => self.place_subslice(base, from, to, from_end)?,
-        })
+        base.offset_with_meta(from_offset, meta, layout, self)
     }
 
+    /// Applying a general projection
     #[instrument(skip(self), level = "trace")]
-    pub fn operand_projection(
-        &self,
-        base: &OpTy<'tcx, M::Provenance>,
-        proj_elem: mir::PlaceElem<'tcx>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+    pub fn project<P>(&self, base: &P, proj_elem: mir::PlaceElem<'tcx>) -> InterpResult<'tcx, P>
+    where
+        P: Projectable<'tcx, M::Provenance> + From<MPlaceTy<'tcx, M::Provenance>> + std::fmt::Debug,
+    {
         use rustc_middle::mir::ProjectionElem::*;
         Ok(match proj_elem {
-            OpaqueCast(ty) => {
-                let mut op = base.clone();
-                op.layout = self.layout_of(ty)?;
-                op
-            }
-            Field(field, _) => self.operand_field(base, field.index())?,
-            Downcast(_, variant) => self.operand_downcast(base, variant)?,
-            Deref => self.deref_operand(base)?.into(),
+            OpaqueCast(ty) => base.transmute(self.layout_of(ty)?, self)?,
+            Field(field, _) => self.project_field(base, field.index())?,
+            Downcast(_, variant) => self.project_downcast(base, variant)?,
+            Deref => self.deref_operand(&base.to_op(self)?)?.into(),
             Index(local) => {
                 let layout = self.layout_of(self.tcx.types.usize)?;
                 let n = self.local_to_op(self.frame(), local, Some(layout))?;
                 let n = self.read_target_usize(&n)?;
-                self.operand_index(base, n)?
+                self.project_index(base, n)?
             }
             ConstantIndex { offset, min_length, from_end } => {
-                self.operand_constant_index(base, offset, min_length, from_end)?
+                self.project_constant_index(base, offset, min_length, from_end)?
             }
-            Subslice { from, to, from_end } => self.operand_subslice(base, from, to, from_end)?,
+            Subslice { from, to, from_end } => self.project_subslice(base, from, to, from_end)?,
         })
     }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index 619da8abb7d..91341ddacd1 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -8,7 +8,7 @@ use rustc_middle::mir;
 use rustc_middle::mir::interpret::{InterpResult, Scalar};
 use rustc_middle::ty::layout::LayoutOf;
 
-use super::{ImmTy, InterpCx, Machine};
+use super::{ImmTy, InterpCx, Machine, Projectable};
 use crate::util;
 
 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
@@ -178,7 +178,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 // The operand always has the same type as the result.
                 let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
                 let val = self.unary_op(un_op, &val)?;
-                assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
+                assert_eq!(val.layout, dest.layout, "layout mismatch for result of {un_op:?}");
                 self.write_immediate(*val, &dest)?;
             }
 
@@ -197,8 +197,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     self.get_place_alloc_mut(&dest)?;
                 } else {
                     // Write the src to the first element.
-                    let first = self.mplace_field(&dest, 0)?;
-                    self.copy_op(&src, &first.into(), /*allow_transmute*/ false)?;
+                    let first = self.project_index(&dest, 0)?;
+                    self.copy_op(&src, &first, /*allow_transmute*/ false)?;
 
                     // This is performance-sensitive code for big static/const arrays! So we
                     // avoid writing each operand individually and instead just make many copies
@@ -302,8 +302,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
             Discriminant(place) => {
                 let op = self.eval_place_to_op(place, None)?;
-                let discr_val = self.read_discriminant(&op)?.0;
-                self.write_scalar(discr_val, &dest)?;
+                let variant = self.read_discriminant(&op)?;
+                let discr = self.discriminant_for_variant(op.layout, variant)?;
+                self.write_scalar(discr, &dest)?;
             }
         }
 
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index 7964c6be008..d0191ea978a 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -60,13 +60,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     }
 
     pub fn fn_arg_field(
-        &mut self,
+        &self,
         arg: &FnArg<'tcx, M::Provenance>,
         field: usize,
     ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
         Ok(match arg {
-            FnArg::Copy(op) => FnArg::Copy(self.operand_field(op, field)?),
-            FnArg::InPlace(place) => FnArg::InPlace(self.place_field(place, field)?),
+            FnArg::Copy(op) => FnArg::Copy(self.project_field(op, field)?),
+            FnArg::InPlace(place) => FnArg::InPlace(self.project_field(place, field)?),
         })
     }
 
@@ -239,7 +239,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
     /// Evaluate the arguments of a function call
     pub(super) fn eval_fn_call_arguments(
-        &mut self,
+        &self,
         ops: &[mir::Operand<'tcx>],
     ) -> InterpResult<'tcx, Vec<FnArg<'tcx, M::Provenance>>> {
         ops.iter()
@@ -382,12 +382,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             // This all has to be in memory, there are no immediate unsized values.
             let src = caller_arg_copy.assert_mem_place();
             // The destination cannot be one of these "spread args".
-            let (dest_frame, dest_local) = callee_arg.assert_local();
+            let (dest_frame, dest_local, dest_offset) = callee_arg
+                .as_mplace_or_local()
+                .right()
+                .expect("callee fn arguments must be locals");
             // We are just initializing things, so there can't be anything here yet.
             assert!(matches!(
                 *self.local_to_op(&self.stack()[dest_frame], dest_local, None)?,
                 Operand::Immediate(Immediate::Uninit)
             ));
+            assert_eq!(dest_offset, None);
             // Allocate enough memory to hold `src`.
             let Some((size, align)) = self.size_and_align_of_mplace(&src)? else {
                 span_bug!(
@@ -595,7 +599,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         if Some(local) == body.spread_arg {
                             // Must be a tuple
                             for i in 0..dest.layout.fields.count() {
-                                let dest = self.place_field(&dest, i)?;
+                                let dest = self.project_field(&dest, i)?;
                                 let callee_abi = callee_args_abis.next().unwrap();
                                 self.pass_argument(&mut caller_args, callee_abi, &dest)?;
                             }
@@ -630,7 +634,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     // Ensure the return place is aligned and dereferenceable, and protect it for
                     // in-place return value passing.
                     if let Either::Left(mplace) = destination.as_mplace_or_local() {
-                        self.check_mplace(mplace)?;
+                        self.check_mplace(&mplace)?;
                     } else {
                         // Nothing to do for locals, they are always properly allocated and aligned.
                     }
@@ -677,7 +681,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                             // Not there yet, search for the only non-ZST field.
                             let mut non_zst_field = None;
                             for i in 0..receiver.layout.fields.count() {
-                                let field = self.operand_field(&receiver, i)?;
+                                let field = self.project_field(&receiver, i)?;
                                 let zst =
                                     field.layout.is_zst() && field.layout.align.abi.bytes() == 1;
                                 if !zst {
@@ -703,12 +707,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let (vptr, dyn_ty, adjusted_receiver) = if let ty::Dynamic(data, _, ty::DynStar) =
                     receiver_place.layout.ty.kind()
                 {
-                    let (recv, vptr) = self.unpack_dyn_star(&receiver_place.into())?;
+                    let (recv, vptr) = self.unpack_dyn_star(&receiver_place)?;
                     let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
                     if dyn_trait != data.principal() {
                         throw_ub_custom!(fluent::const_eval_dyn_star_call_vtable_mismatch);
                     }
-                    let recv = recv.assert_mem_place(); // we passed an MPlaceTy to `unpack_dyn_star` so we definitely still have one
 
                     (vptr, dyn_ty, recv.ptr)
                 } else {
@@ -836,7 +839,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
             ty::Dynamic(_, _, ty::DynStar) => {
                 // Dropping a `dyn*`. Need to find actual drop fn.
-                self.unpack_dyn_star(&place.into())?.0.assert_mem_place()
+                self.unpack_dyn_star(&place)?.0
             }
             _ => {
                 debug_assert_eq!(
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 21c655988a0..aee95f70bc2 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -29,7 +29,7 @@ use std::hash::Hash;
 use super::UndefinedBehaviorInfo::*;
 use super::{
     AllocId, CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy,
-    Machine, MemPlaceMeta, OpTy, Pointer, Scalar, ValueVisitor,
+    Machine, MemPlaceMeta, OpTy, Pointer, Projectable, Scalar, ValueVisitor,
 };
 
 macro_rules! throw_validation_failure {
@@ -136,19 +136,19 @@ pub struct RefTracking<T, PATH = ()> {
     pub todo: Vec<(T, PATH)>,
 }
 
-impl<T: Copy + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH> {
+impl<T: Clone + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH> {
     pub fn empty() -> Self {
         RefTracking { seen: FxHashSet::default(), todo: vec![] }
     }
     pub fn new(op: T) -> Self {
         let mut ref_tracking_for_consts =
-            RefTracking { seen: FxHashSet::default(), todo: vec![(op, PATH::default())] };
+            RefTracking { seen: FxHashSet::default(), todo: vec![(op.clone(), PATH::default())] };
         ref_tracking_for_consts.seen.insert(op);
         ref_tracking_for_consts
     }
 
     pub fn track(&mut self, op: T, path: impl FnOnce() -> PATH) {
-        if self.seen.insert(op) {
+        if self.seen.insert(op.clone()) {
             trace!("Recursing below ptr {:#?}", op);
             let path = path();
             // Remember to come back to this later.
@@ -164,14 +164,14 @@ fn write_path(out: &mut String, path: &[PathElem]) {
 
     for elem in path.iter() {
         match elem {
-            Field(name) => write!(out, ".{}", name),
+            Field(name) => write!(out, ".{name}"),
             EnumTag => write!(out, ".<enum-tag>"),
-            Variant(name) => write!(out, ".<enum-variant({})>", name),
+            Variant(name) => write!(out, ".<enum-variant({name})>"),
             GeneratorTag => write!(out, ".<generator-tag>"),
             GeneratorState(idx) => write!(out, ".<generator-state({})>", idx.index()),
-            CapturedVar(name) => write!(out, ".<captured-var({})>", name),
-            TupleElem(idx) => write!(out, ".{}", idx),
-            ArrayElem(idx) => write!(out, "[{}]", idx),
+            CapturedVar(name) => write!(out, ".<captured-var({name})>"),
+            TupleElem(idx) => write!(out, ".{idx}"),
+            ArrayElem(idx) => write!(out, "[{idx}]"),
             // `.<deref>` does not match Rust syntax, but it is more readable for long paths -- and
             // some of the other items here also are not Rust syntax. Actually we can't
             // even use the usual syntax because we are just showing the projections,
@@ -462,6 +462,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
 
     /// Check if this is a value of primitive type, and if yes check the validity of the value
     /// at that type. Return `true` if the type is indeed primitive.
+    ///
+    /// Note that not all of these have `FieldsShape::Primitive`, e.g. wide references.
     fn try_visit_primitive(
         &mut self,
         value: &OpTy<'tcx, M::Provenance>,
@@ -660,10 +662,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                 InvalidTag(val) => InvalidEnumTag {
                     value: format!("{val:x}"),
                 },
-
+                UninhabitedEnumVariantRead(_) => UninhabitedEnumTag,
                 InvalidUninitBytes(None) => UninitEnumTag,
-            )
-            .1)
+            ))
         })
     }
 
@@ -733,60 +734,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
             }
         }
 
-        // Recursively walk the value at its type.
-        self.walk_value(op)?;
-
-        // *After* all of this, check the ABI. We need to check the ABI to handle
-        // types like `NonNull` where the `Scalar` info is more restrictive than what
-        // the fields say (`rustc_layout_scalar_valid_range_start`).
-        // But in most cases, this will just propagate what the fields say,
-        // and then we want the error to point at the field -- so, first recurse,
-        // then check ABI.
-        //
-        // FIXME: We could avoid some redundant checks here. For newtypes wrapping
-        // scalars, we do the same check on every "level" (e.g., first we check
-        // MyNewtype and then the scalar in there).
-        match op.layout.abi {
-            Abi::Uninhabited => {
-                let ty = op.layout.ty;
-                throw_validation_failure!(self.path, UninhabitedVal { ty });
-            }
-            Abi::Scalar(scalar_layout) => {
-                if !scalar_layout.is_uninit_valid() {
-                    // There is something to check here.
-                    let scalar = self.read_scalar(op, ExpectedKind::InitScalar)?;
-                    self.visit_scalar(scalar, scalar_layout)?;
-                }
-            }
-            Abi::ScalarPair(a_layout, b_layout) => {
-                // We can only proceed if *both* scalars need to be initialized.
-                // FIXME: find a way to also check ScalarPair when one side can be uninit but
-                // the other must be init.
-                if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
-                    let (a, b) =
-                        self.read_immediate(op, ExpectedKind::InitScalar)?.to_scalar_pair();
-                    self.visit_scalar(a, a_layout)?;
-                    self.visit_scalar(b, b_layout)?;
-                }
-            }
-            Abi::Vector { .. } => {
-                // No checks here, we assume layout computation gets this right.
-                // (This is harder to check since Miri does not represent these as `Immediate`. We
-                // also cannot use field projections since this might be a newtype around a vector.)
-            }
-            Abi::Aggregate { .. } => {
-                // Nothing to do.
-            }
-        }
-
-        Ok(())
-    }
-
-    fn visit_aggregate(
-        &mut self,
-        op: &OpTy<'tcx, M::Provenance>,
-        fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
-    ) -> InterpResult<'tcx> {
+        // Recursively walk the value at its type. Apply optimizations for some large types.
         match op.layout.ty.kind() {
             ty::Str => {
                 let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate
@@ -874,12 +822,58 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
             // ZST type, so either validation fails for all elements or none.
             ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(*tys)?.is_zst() => {
                 // Validate just the first element (if any).
-                self.walk_aggregate(op, fields.take(1))?
+                if op.len(self.ecx)? > 0 {
+                    self.visit_field(op, 0, &self.ecx.project_index(op, 0)?)?;
+                }
             }
             _ => {
-                self.walk_aggregate(op, fields)? // default handler
+                self.walk_value(op)?; // default handler
             }
         }
+
+        // *After* all of this, check the ABI. We need to check the ABI to handle
+        // types like `NonNull` where the `Scalar` info is more restrictive than what
+        // the fields say (`rustc_layout_scalar_valid_range_start`).
+        // But in most cases, this will just propagate what the fields say,
+        // and then we want the error to point at the field -- so, first recurse,
+        // then check ABI.
+        //
+        // FIXME: We could avoid some redundant checks here. For newtypes wrapping
+        // scalars, we do the same check on every "level" (e.g., first we check
+        // MyNewtype and then the scalar in there).
+        match op.layout.abi {
+            Abi::Uninhabited => {
+                let ty = op.layout.ty;
+                throw_validation_failure!(self.path, UninhabitedVal { ty });
+            }
+            Abi::Scalar(scalar_layout) => {
+                if !scalar_layout.is_uninit_valid() {
+                    // There is something to check here.
+                    let scalar = self.read_scalar(op, ExpectedKind::InitScalar)?;
+                    self.visit_scalar(scalar, scalar_layout)?;
+                }
+            }
+            Abi::ScalarPair(a_layout, b_layout) => {
+                // We can only proceed if *both* scalars need to be initialized.
+                // FIXME: find a way to also check ScalarPair when one side can be uninit but
+                // the other must be init.
+                if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
+                    let (a, b) =
+                        self.read_immediate(op, ExpectedKind::InitScalar)?.to_scalar_pair();
+                    self.visit_scalar(a, a_layout)?;
+                    self.visit_scalar(b, b_layout)?;
+                }
+            }
+            Abi::Vector { .. } => {
+                // No checks here, we assume layout computation gets this right.
+                // (This is harder to check since Miri does not represent these as `Immediate`. We
+                // also cannot use field projections since this might be a newtype around a vector.)
+            }
+            Abi::Aggregate { .. } => {
+                // Nothing to do.
+            }
+        }
+
         Ok(())
     }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
index 879ae198f7e..531e2bd3ee0 100644
--- a/compiler/rustc_const_eval/src/interpret/visitor.rs
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -1,544 +1,202 @@
 //! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound
 //! types until we arrive at the leaves, with custom handling for primitive types.
 
+use rustc_index::IndexVec;
 use rustc_middle::mir::interpret::InterpResult;
 use rustc_middle::ty;
-use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::FieldIdx;
 use rustc_target::abi::{FieldsShape, VariantIdx, Variants};
 
 use std::num::NonZeroUsize;
 
-use super::{InterpCx, MPlaceTy, Machine, OpTy, PlaceTy};
+use super::{InterpCx, MPlaceTy, Machine, Projectable};
 
-/// A thing that we can project into, and that has a layout.
-/// This wouldn't have to depend on `Machine` but with the current type inference,
-/// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
-pub trait Value<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
-    /// Gets this value's layout.
-    fn layout(&self) -> TyAndLayout<'tcx>;
+/// How to traverse a value and what to do when we are at the leaves.
+pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
+    type V: Projectable<'tcx, M::Provenance> + From<MPlaceTy<'tcx, M::Provenance>>;
 
-    /// Makes this into an `OpTy`, in a cheap way that is good for reading.
-    fn to_op_for_read(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
-
-    /// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
-    fn to_op_for_proj(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        self.to_op_for_read(ecx)
-    }
-
-    /// Creates this from an `OpTy`.
-    ///
-    /// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
-    fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
-
-    /// Projects to the given enum variant.
-    fn project_downcast(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, Self>;
-
-    /// Projects to the n-th field.
-    fn project_field(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        field: usize,
-    ) -> InterpResult<'tcx, Self>;
-}
-
-/// A thing that we can project into given *mutable* access to `ecx`, and that has a layout.
-/// This wouldn't have to depend on `Machine` but with the current type inference,
-/// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
-pub trait ValueMut<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
-    /// Gets this value's layout.
-    fn layout(&self) -> TyAndLayout<'tcx>;
-
-    /// Makes this into an `OpTy`, in a cheap way that is good for reading.
-    fn to_op_for_read(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
-
-    /// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
-    fn to_op_for_proj(
-        &self,
-        ecx: &mut InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
-
-    /// Creates this from an `OpTy`.
-    ///
-    /// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
-    fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
-
-    /// Projects to the given enum variant.
-    fn project_downcast(
-        &self,
-        ecx: &mut InterpCx<'mir, 'tcx, M>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, Self>;
-
-    /// Projects to the n-th field.
-    fn project_field(
-        &self,
-        ecx: &mut InterpCx<'mir, 'tcx, M>,
-        field: usize,
-    ) -> InterpResult<'tcx, Self>;
-}
-
-// We cannot have a general impl which shows that Value implies ValueMut. (When we do, it says we
-// cannot `impl ValueMut for PlaceTy` because some downstream crate could `impl Value for PlaceTy`.)
-// So we have some copy-paste here. (We could have a macro but since we only have 2 types with this
-// double-impl, that would barely make the code shorter, if at all.)
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::Provenance> {
-    #[inline(always)]
-    fn layout(&self) -> TyAndLayout<'tcx> {
-        self.layout
-    }
-
-    #[inline(always)]
-    fn to_op_for_read(
-        &self,
-        _ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        Ok(self.clone())
-    }
-
-    #[inline(always)]
-    fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
-        op.clone()
-    }
-
-    #[inline(always)]
-    fn project_downcast(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.operand_downcast(self, variant)
-    }
-
-    #[inline(always)]
-    fn project_field(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        field: usize,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.operand_field(self, field)
-    }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
-    for OpTy<'tcx, M::Provenance>
-{
-    #[inline(always)]
-    fn layout(&self) -> TyAndLayout<'tcx> {
-        self.layout
-    }
-
-    #[inline(always)]
-    fn to_op_for_read(
-        &self,
-        _ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        Ok(self.clone())
-    }
-
-    #[inline(always)]
-    fn to_op_for_proj(
-        &self,
-        _ecx: &mut InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        Ok(self.clone())
-    }
-
-    #[inline(always)]
-    fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
-        op.clone()
-    }
-
-    #[inline(always)]
-    fn project_downcast(
-        &self,
-        ecx: &mut InterpCx<'mir, 'tcx, M>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.operand_downcast(self, variant)
-    }
-
-    #[inline(always)]
-    fn project_field(
-        &self,
-        ecx: &mut InterpCx<'mir, 'tcx, M>,
-        field: usize,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.operand_field(self, field)
-    }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M>
-    for MPlaceTy<'tcx, M::Provenance>
-{
-    #[inline(always)]
-    fn layout(&self) -> TyAndLayout<'tcx> {
-        self.layout
-    }
-
-    #[inline(always)]
-    fn to_op_for_read(
-        &self,
-        _ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        Ok(self.into())
-    }
-
-    #[inline(always)]
-    fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
-        // assert is justified because our `to_op_for_read` only ever produces `Indirect` operands.
-        op.assert_mem_place()
-    }
-
-    #[inline(always)]
-    fn project_downcast(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.mplace_downcast(self, variant)
-    }
-
-    #[inline(always)]
-    fn project_field(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        field: usize,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.mplace_field(self, field)
-    }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
-    for MPlaceTy<'tcx, M::Provenance>
-{
-    #[inline(always)]
-    fn layout(&self) -> TyAndLayout<'tcx> {
-        self.layout
-    }
-
-    #[inline(always)]
-    fn to_op_for_read(
-        &self,
-        _ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        Ok(self.into())
-    }
-
-    #[inline(always)]
-    fn to_op_for_proj(
-        &self,
-        _ecx: &mut InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        Ok(self.into())
-    }
-
-    #[inline(always)]
-    fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
-        // assert is justified because our `to_op_for_proj` only ever produces `Indirect` operands.
-        op.assert_mem_place()
-    }
-
-    #[inline(always)]
-    fn project_downcast(
-        &self,
-        ecx: &mut InterpCx<'mir, 'tcx, M>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.mplace_downcast(self, variant)
-    }
-
-    #[inline(always)]
-    fn project_field(
-        &self,
-        ecx: &mut InterpCx<'mir, 'tcx, M>,
-        field: usize,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.mplace_field(self, field)
-    }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
-    for PlaceTy<'tcx, M::Provenance>
-{
-    #[inline(always)]
-    fn layout(&self) -> TyAndLayout<'tcx> {
-        self.layout
-    }
-
-    #[inline(always)]
-    fn to_op_for_read(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        // No need for `force_allocation` since we are just going to read from this.
-        ecx.place_to_op(self)
-    }
+    /// The visitor must have an `InterpCx` in it.
+    fn ecx(&self) -> &InterpCx<'mir, 'tcx, M>;
 
+    /// `read_discriminant` can be hooked for better error messages.
     #[inline(always)]
-    fn to_op_for_proj(
-        &self,
-        ecx: &mut InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        // We `force_allocation` here so that `from_op` below can work.
-        Ok(ecx.force_allocation(self)?.into())
+    fn read_discriminant(&mut self, v: &Self::V) -> InterpResult<'tcx, VariantIdx> {
+        Ok(self.ecx().read_discriminant(&v.to_op(self.ecx())?)?)
     }
 
-    #[inline(always)]
-    fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
-        // assert is justified because our `to_op` only ever produces `Indirect` operands.
-        op.assert_mem_place().into()
-    }
-
-    #[inline(always)]
-    fn project_downcast(
-        &self,
-        ecx: &mut InterpCx<'mir, 'tcx, M>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.place_downcast(self, variant)
-    }
-
-    #[inline(always)]
-    fn project_field(
-        &self,
-        ecx: &mut InterpCx<'mir, 'tcx, M>,
-        field: usize,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.place_field(self, field)
-    }
-}
-
-macro_rules! make_value_visitor {
-    ($visitor_trait:ident, $value_trait:ident, $($mutability:ident)?) => {
-        /// How to traverse a value and what to do when we are at the leaves.
-        pub trait $visitor_trait<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
-            type V: $value_trait<'mir, 'tcx, M>;
-
-            /// The visitor must have an `InterpCx` in it.
-            fn ecx(&$($mutability)? self)
-                -> &$($mutability)? InterpCx<'mir, 'tcx, M>;
-
-            /// `read_discriminant` can be hooked for better error messages.
-            #[inline(always)]
-            fn read_discriminant(
-                &mut self,
-                op: &OpTy<'tcx, M::Provenance>,
-            ) -> InterpResult<'tcx, VariantIdx> {
-                Ok(self.ecx().read_discriminant(op)?.1)
-            }
-
-            // Recursive actions, ready to be overloaded.
-            /// Visits the given value, dispatching as appropriate to more specialized visitors.
-            #[inline(always)]
-            fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
-            {
-                self.walk_value(v)
-            }
-            /// Visits the given value as a union. No automatic recursion can happen here.
-            #[inline(always)]
-            fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx>
-            {
-                Ok(())
-            }
-            /// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.
-            /// The type of `v` will be a raw pointer, but this is a field of `Box<T>` and the
-            /// pointee type is the actual `T`.
-            #[inline(always)]
-            fn visit_box(&mut self, _v: &Self::V) -> InterpResult<'tcx>
-            {
-                Ok(())
+    /// This function provides the chance to reorder the order in which fields are visited for
+    /// `FieldsShape::Aggregate`: The order of fields will be
+    /// `(0..num_fields).map(aggregate_field_order)`.
+    ///
+    /// The default means we iterate in source declaration order; alternative this can do an inverse
+    /// lookup in `memory_index` to use memory field order instead.
+    #[inline(always)]
+    fn aggregate_field_order(_memory_index: &IndexVec<FieldIdx, u32>, idx: usize) -> usize {
+        idx
+    }
+
+    // Recursive actions, ready to be overloaded.
+    /// Visits the given value, dispatching as appropriate to more specialized visitors.
+    #[inline(always)]
+    fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
+        self.walk_value(v)
+    }
+    /// Visits the given value as a union. No automatic recursion can happen here.
+    #[inline(always)]
+    fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx> {
+        Ok(())
+    }
+    /// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.
+    /// The type of `v` will be a raw pointer, but this is a field of `Box<T>` and the
+    /// pointee type is the actual `T`.
+    #[inline(always)]
+    fn visit_box(&mut self, _v: &Self::V) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Called each time we recurse down to a field of a "product-like" aggregate
+    /// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
+    /// and new (inner) value.
+    /// This gives the visitor the chance to track the stack of nested fields that
+    /// we are descending through.
+    #[inline(always)]
+    fn visit_field(
+        &mut self,
+        _old_val: &Self::V,
+        _field: usize,
+        new_val: &Self::V,
+    ) -> InterpResult<'tcx> {
+        self.visit_value(new_val)
+    }
+    /// Called when recursing into an enum variant.
+    /// This gives the visitor the chance to track the stack of nested fields that
+    /// we are descending through.
+    #[inline(always)]
+    fn visit_variant(
+        &mut self,
+        _old_val: &Self::V,
+        _variant: VariantIdx,
+        new_val: &Self::V,
+    ) -> InterpResult<'tcx> {
+        self.visit_value(new_val)
+    }
+
+    fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
+        let ty = v.layout().ty;
+        trace!("walk_value: type: {ty}");
+
+        // Special treatment for special types, where the (static) layout is not sufficient.
+        match *ty.kind() {
+            // If it is a trait object, switch to the real type that was used to create it.
+            ty::Dynamic(_, _, ty::Dyn) => {
+                // Dyn types. This is unsized, and the actual dynamic type of the data is given by the
+                // vtable stored in the place metadata.
+                // unsized values are never immediate, so we can assert_mem_place
+                let op = v.to_op(self.ecx())?;
+                let dest = op.assert_mem_place();
+                let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0;
+                trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
+                // recurse with the inner type
+                return self.visit_field(&v, 0, &inner_mplace.into());
             }
-            /// Visits this value as an aggregate, you are getting an iterator yielding
-            /// all the fields (still in an `InterpResult`, you have to do error handling yourself).
-            /// Recurses into the fields.
-            #[inline(always)]
-            fn visit_aggregate(
-                &mut self,
-                v: &Self::V,
-                fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
-            ) -> InterpResult<'tcx> {
-                self.walk_aggregate(v, fields)
+            ty::Dynamic(_, _, ty::DynStar) => {
+                // DynStar types. Very different from a dyn type (but strangely part of the
+                // same variant in `TyKind`): These are pairs where the 2nd component is the
+                // vtable, and the first component is the data (which must be ptr-sized).
+                let data = self.ecx().unpack_dyn_star(v)?.0;
+                return self.visit_field(&v, 0, &data);
             }
-
-            /// Called each time we recurse down to a field of a "product-like" aggregate
-            /// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
-            /// and new (inner) value.
-            /// This gives the visitor the chance to track the stack of nested fields that
-            /// we are descending through.
-            #[inline(always)]
-            fn visit_field(
-                &mut self,
-                _old_val: &Self::V,
-                _field: usize,
-                new_val: &Self::V,
-            ) -> InterpResult<'tcx> {
-                self.visit_value(new_val)
+            // Slices do not need special handling here: they have `Array` field
+            // placement with length 0, so we enter the `Array` case below which
+            // indirectly uses the metadata to determine the actual length.
+
+            // However, `Box`... let's talk about `Box`.
+            ty::Adt(def, ..) if def.is_box() => {
+                // `Box` is a hybrid primitive-library-defined type that one the one hand is
+                // a dereferenceable pointer, on the other hand has *basically arbitrary
+                // user-defined layout* since the user controls the 'allocator' field. So it
+                // cannot be treated like a normal pointer, since it does not fit into an
+                // `Immediate`. Yeah, it is quite terrible. But many visitors want to do
+                // something with "all boxed pointers", so we handle this mess for them.
+                //
+                // When we hit a `Box`, we do not do the usual field recursion; instead,
+                // we (a) call `visit_box` on the pointer value, and (b) recurse on the
+                // allocator field. We also assert tons of things to ensure we do not miss
+                // any other fields.
+
+                // `Box` has two fields: the pointer we care about, and the allocator.
+                assert_eq!(v.layout().fields.count(), 2, "`Box` must have exactly 2 fields");
+                let (unique_ptr, alloc) =
+                    (self.ecx().project_field(v, 0)?, self.ecx().project_field(v, 1)?);
+                // Unfortunately there is some type junk in the way here: `unique_ptr` is a `Unique`...
+                // (which means another 2 fields, the second of which is a `PhantomData`)
+                assert_eq!(unique_ptr.layout().fields.count(), 2);
+                let (nonnull_ptr, phantom) = (
+                    self.ecx().project_field(&unique_ptr, 0)?,
+                    self.ecx().project_field(&unique_ptr, 1)?,
+                );
+                assert!(
+                    phantom.layout().ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()),
+                    "2nd field of `Unique` should be PhantomData but is {:?}",
+                    phantom.layout().ty,
+                );
+                // ... that contains a `NonNull`... (gladly, only a single field here)
+                assert_eq!(nonnull_ptr.layout().fields.count(), 1);
+                let raw_ptr = self.ecx().project_field(&nonnull_ptr, 0)?; // the actual raw ptr
+                // ... whose only field finally is a raw ptr we can dereference.
+                self.visit_box(&raw_ptr)?;
+
+                // The second `Box` field is the allocator, which we recursively check for validity
+                // like in regular structs.
+                self.visit_field(v, 1, &alloc)?;
+
+                // We visited all parts of this one.
+                return Ok(());
             }
-            /// Called when recursing into an enum variant.
-            /// This gives the visitor the chance to track the stack of nested fields that
-            /// we are descending through.
-            #[inline(always)]
-            fn visit_variant(
-                &mut self,
-                _old_val: &Self::V,
-                _variant: VariantIdx,
-                new_val: &Self::V,
-            ) -> InterpResult<'tcx> {
-                self.visit_value(new_val)
+            _ => {}
+        };
+
+        // Visit the fields of this value.
+        match &v.layout().fields {
+            FieldsShape::Primitive => {}
+            &FieldsShape::Union(fields) => {
+                self.visit_union(v, fields)?;
             }
-
-            // Default recursors. Not meant to be overloaded.
-            fn walk_aggregate(
-                &mut self,
-                v: &Self::V,
-                fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
-            ) -> InterpResult<'tcx> {
-                // Now iterate over it.
-                for (idx, field_val) in fields.enumerate() {
-                    self.visit_field(v, idx, &field_val?)?;
+            FieldsShape::Arbitrary { offsets, memory_index } => {
+                for idx in 0..offsets.len() {
+                    let idx = Self::aggregate_field_order(memory_index, idx);
+                    let field = self.ecx().project_field(v, idx)?;
+                    self.visit_field(v, idx, &field)?;
                 }
-                Ok(())
             }
-            fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
-            {
-                let ty = v.layout().ty;
-                trace!("walk_value: type: {ty}");
-
-                // Special treatment for special types, where the (static) layout is not sufficient.
-                match *ty.kind() {
-                    // If it is a trait object, switch to the real type that was used to create it.
-                    ty::Dynamic(_, _, ty::Dyn) => {
-                        // Dyn types. This is unsized, and the actual dynamic type of the data is given by the
-                        // vtable stored in the place metadata.
-                        // unsized values are never immediate, so we can assert_mem_place
-                        let op = v.to_op_for_read(self.ecx())?;
-                        let dest = op.assert_mem_place();
-                        let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0;
-                        trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
-                        // recurse with the inner type
-                        return self.visit_field(&v, 0, &$value_trait::from_op(&inner_mplace.into()));
-                    },
-                    ty::Dynamic(_, _, ty::DynStar) => {
-                        // DynStar types. Very different from a dyn type (but strangely part of the
-                        // same variant in `TyKind`): These are pairs where the 2nd component is the
-                        // vtable, and the first component is the data (which must be ptr-sized).
-                        let op = v.to_op_for_proj(self.ecx())?;
-                        let data = self.ecx().unpack_dyn_star(&op)?.0;
-                        return self.visit_field(&v, 0, &$value_trait::from_op(&data));
-                    }
-                    // Slices do not need special handling here: they have `Array` field
-                    // placement with length 0, so we enter the `Array` case below which
-                    // indirectly uses the metadata to determine the actual length.
-
-                    // However, `Box`... let's talk about `Box`.
-                    ty::Adt(def, ..) if def.is_box() => {
-                        // `Box` is a hybrid primitive-library-defined type that one the one hand is
-                        // a dereferenceable pointer, on the other hand has *basically arbitrary
-                        // user-defined layout* since the user controls the 'allocator' field. So it
-                        // cannot be treated like a normal pointer, since it does not fit into an
-                        // `Immediate`. Yeah, it is quite terrible. But many visitors want to do
-                        // something with "all boxed pointers", so we handle this mess for them.
-                        //
-                        // When we hit a `Box`, we do not do the usual `visit_aggregate`; instead,
-                        // we (a) call `visit_box` on the pointer value, and (b) recurse on the
-                        // allocator field. We also assert tons of things to ensure we do not miss
-                        // any other fields.
-
-                        // `Box` has two fields: the pointer we care about, and the allocator.
-                        assert_eq!(v.layout().fields.count(), 2, "`Box` must have exactly 2 fields");
-                        let (unique_ptr, alloc) =
-                            (v.project_field(self.ecx(), 0)?, v.project_field(self.ecx(), 1)?);
-                        // Unfortunately there is some type junk in the way here: `unique_ptr` is a `Unique`...
-                        // (which means another 2 fields, the second of which is a `PhantomData`)
-                        assert_eq!(unique_ptr.layout().fields.count(), 2);
-                        let (nonnull_ptr, phantom) = (
-                            unique_ptr.project_field(self.ecx(), 0)?,
-                            unique_ptr.project_field(self.ecx(), 1)?,
-                        );
-                        assert!(
-                            phantom.layout().ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()),
-                            "2nd field of `Unique` should be PhantomData but is {:?}",
-                            phantom.layout().ty,
-                        );
-                        // ... that contains a `NonNull`... (gladly, only a single field here)
-                        assert_eq!(nonnull_ptr.layout().fields.count(), 1);
-                        let raw_ptr = nonnull_ptr.project_field(self.ecx(), 0)?; // the actual raw ptr
-                        // ... whose only field finally is a raw ptr we can dereference.
-                        self.visit_box(&raw_ptr)?;
-
-                        // The second `Box` field is the allocator, which we recursively check for validity
-                        // like in regular structs.
-                        self.visit_field(v, 1, &alloc)?;
-
-                        // We visited all parts of this one.
-                        return Ok(());
-                    }
-                    _ => {},
-                };
-
-                // Visit the fields of this value.
-                match &v.layout().fields {
-                    FieldsShape::Primitive => {}
-                    &FieldsShape::Union(fields) => {
-                        self.visit_union(v, fields)?;
-                    }
-                    FieldsShape::Arbitrary { offsets, .. } => {
-                        // FIXME: We collect in a vec because otherwise there are lifetime
-                        // errors: Projecting to a field needs access to `ecx`.
-                        let fields: Vec<InterpResult<'tcx, Self::V>> =
-                            (0..offsets.len()).map(|i| {
-                                v.project_field(self.ecx(), i)
-                            })
-                            .collect();
-                        self.visit_aggregate(v, fields.into_iter())?;
-                    }
-                    FieldsShape::Array { .. } => {
-                        // Let's get an mplace (or immediate) first.
-                        // This might `force_allocate` if `v` is a `PlaceTy`, but `place_index` does that anyway.
-                        let op = v.to_op_for_proj(self.ecx())?;
-                        // Now we can go over all the fields.
-                        // This uses the *run-time length*, i.e., if we are a slice,
-                        // the dynamic info from the metadata is used.
-                        let iter = self.ecx().operand_array_fields(&op)?
-                            .map(|f| f.and_then(|f| {
-                                Ok($value_trait::from_op(&f))
-                            }));
-                        self.visit_aggregate(v, iter)?;
-                    }
+            FieldsShape::Array { .. } => {
+                for (idx, field) in self.ecx().project_array_fields(v)?.enumerate() {
+                    self.visit_field(v, idx, &field?)?;
                 }
+            }
+        }
 
-                match v.layout().variants {
-                    // If this is a multi-variant layout, find the right variant and proceed
-                    // with *its* fields.
-                    Variants::Multiple { .. } => {
-                        let op = v.to_op_for_read(self.ecx())?;
-                        let idx = self.read_discriminant(&op)?;
-                        let inner = v.project_downcast(self.ecx(), idx)?;
-                        trace!("walk_value: variant layout: {:#?}", inner.layout());
-                        // recurse with the inner type
-                        self.visit_variant(v, idx, &inner)
-                    }
-                    // For single-variant layouts, we already did anything there is to do.
-                    Variants::Single { .. } => Ok(())
-                }
+        match v.layout().variants {
+            // If this is a multi-variant layout, find the right variant and proceed
+            // with *its* fields.
+            Variants::Multiple { .. } => {
+                let idx = self.read_discriminant(v)?;
+                // There are 3 cases where downcasts can turn a Scalar/ScalarPair into a different ABI which
+                // could be a problem for `ImmTy` (see layout_sanity_check):
+                // - variant.size == Size::ZERO: works fine because `ImmTy::offset` has a special case for
+                //   zero-sized layouts.
+                // - variant.fields.count() == 0: works fine because `ImmTy::offset` has a special case for
+                //   zero-field aggregates.
+                // - variant.abi.is_uninhabited(): triggers UB in `read_discriminant` so we never get here.
+                let inner = self.ecx().project_downcast(v, idx)?;
+                trace!("walk_value: variant layout: {:#?}", inner.layout());
+                // recurse with the inner type
+                self.visit_variant(v, idx, &inner)?;
             }
+            // For single-variant layouts, we already did anything there is to do.
+            Variants::Single { .. } => {}
         }
+
+        Ok(())
     }
 }
-
-make_value_visitor!(ValueVisitor, Value,);
-make_value_visitor!(MutValueVisitor, ValueMut, mut);
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
index d8b8fa927c7..ad5ffa6511f 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -8,6 +8,7 @@ use rustc_infer::infer::TyCtxtInferExt;
 use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
 use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
 use rustc_middle::mir::*;
+use rustc_middle::traits::BuiltinImplSource;
 use rustc_middle::ty::{self, adjustment::PointerCoercion, Instance, InstanceDef, Ty, TyCtxt};
 use rustc_middle::ty::{GenericArgKind, GenericArgs};
 use rustc_middle::ty::{TraitRef, TypeVisitableExt};
@@ -20,7 +21,7 @@ use std::mem;
 use std::ops::Deref;
 
 use super::ops::{self, NonConstOp, Status};
-use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop, NeedsNonConstDrop};
+use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop};
 use super::resolver::FlowSensitiveAnalysis;
 use super::{ConstCx, Qualif};
 use crate::const_eval::is_unstable_const_fn;
@@ -33,7 +34,7 @@ type QualifResults<'mir, 'tcx, Q> =
 pub struct Qualifs<'mir, 'tcx> {
     has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>,
     needs_drop: Option<QualifResults<'mir, 'tcx, NeedsDrop>>,
-    needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>,
+    // needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>,
 }
 
 impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
@@ -76,15 +77,17 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
         local: Local,
         location: Location,
     ) -> bool {
+        // FIXME(effects) replace with `NeedsNonconstDrop` after const traits work again
+        /*
         let ty = ccx.body.local_decls[local].ty;
-        if !NeedsNonConstDrop::in_any_value_of_ty(ccx, ty) {
+        if !NeedsDrop::in_any_value_of_ty(ccx, ty) {
             return false;
         }
 
         let needs_non_const_drop = self.needs_non_const_drop.get_or_insert_with(|| {
             let ConstCx { tcx, body, .. } = *ccx;
 
-            FlowSensitiveAnalysis::new(NeedsNonConstDrop, ccx)
+            FlowSensitiveAnalysis::new(NeedsDrop, ccx)
                 .into_engine(tcx, &body)
                 .iterate_to_fixpoint()
                 .into_results_cursor(&body)
@@ -92,6 +95,9 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
 
         needs_non_const_drop.seek_before_primary_effect(location);
         needs_non_const_drop.get().contains(local)
+        */
+
+        self.needs_drop(ccx, local, location)
     }
 
     /// Returns `true` if `local` is `HasMutInterior` at the given `Location`.
@@ -766,7 +772,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                     };
 
                     match implsrc {
-                        Ok(Some(ImplSource::Param(_, ty::BoundConstness::ConstIfConst))) => {
+                        Ok(Some(ImplSource::Param(ty::BoundConstness::ConstIfConst, _))) => {
                             debug!(
                                 "const_trait_impl: provided {:?} via where-clause in {:?}",
                                 trait_ref, param_env
@@ -774,7 +780,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                             return;
                         }
                         // Closure: Fn{Once|Mut}
-                        Ok(Some(ImplSource::Builtin(_)))
+                        Ok(Some(ImplSource::Builtin(BuiltinImplSource::Misc, _)))
                             if trait_ref.self_ty().is_closure()
                                 && tcx.fn_trait_kind_from_def_id(trait_id).is_some() =>
                         {
@@ -797,16 +803,6 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                         }
                         Ok(Some(ImplSource::UserDefined(data))) => {
                             let callee_name = tcx.item_name(callee);
-                            if let Some(&did) = tcx
-                                .associated_item_def_ids(data.impl_def_id)
-                                .iter()
-                                .find(|did| tcx.item_name(**did) == callee_name)
-                            {
-                                // using internal args is ok here, since this is only
-                                // used for the `resolve` call below
-                                fn_args = GenericArgs::identity_for_item(tcx, did);
-                                callee = did;
-                            }
 
                             if let hir::Constness::NotConst = tcx.constness(data.impl_def_id) {
                                 self.check_op(ops::FnCallNonConst {
@@ -819,6 +815,17 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                                 });
                                 return;
                             }
+
+                            if let Some(&did) = tcx
+                                .associated_item_def_ids(data.impl_def_id)
+                                .iter()
+                                .find(|did| tcx.item_name(**did) == callee_name)
+                            {
+                                // using internal args is ok here, since this is only
+                                // used for the `resolve` call below
+                                fn_args = GenericArgs::identity_for_item(tcx, did);
+                                callee = did;
+                            }
                         }
                         _ if !tcx.is_const_fn_raw(callee) => {
                             // At this point, it is only legal when the caller is in a trait
@@ -995,8 +1002,9 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
                 let mut err_span = self.span;
                 let ty_of_dropped_place = dropped_place.ty(self.body, self.tcx).ty;
 
+                // FIXME(effects) replace with `NeedsNonConstDrop` once we fix const traits
                 let ty_needs_non_const_drop =
-                    qualifs::NeedsNonConstDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place);
+                    qualifs::NeedsDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place);
 
                 debug!(?ty_of_dropped_place, ?ty_needs_non_const_drop);
 
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
index 81337079af2..e785196c744 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
@@ -310,8 +310,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
 
         if let Some(feature) = feature && ccx.tcx.sess.is_nightly_build() {
             err.help(format!(
-                "add `#![feature({})]` to the crate attributes to enable",
-                feature,
+                "add `#![feature({feature})]` to the crate attributes to enable",
             ));
         }
 
@@ -346,10 +345,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallUnstable {
             err.help("const-stable functions can only call other const-stable functions");
         } else if ccx.tcx.sess.is_nightly_build() {
             if let Some(feature) = feature {
-                err.help(format!(
-                    "add `#![feature({})]` to the crate attributes to enable",
-                    feature
-                ));
+                err.help(format!("add `#![feature({feature})]` to the crate attributes to enable"));
             }
         }
 
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
index 1f1640fd80a..e3377bd10c6 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
@@ -5,7 +5,7 @@ use rustc_span::{symbol::sym, Span};
 
 use super::check::Qualifs;
 use super::ops::{self, NonConstOp};
-use super::qualifs::{NeedsNonConstDrop, Qualif};
+use super::qualifs::{NeedsDrop, Qualif};
 use super::ConstCx;
 
 /// Returns `true` if we should use the more precise live drop checker that runs after drop
@@ -82,7 +82,9 @@ impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
         match &terminator.kind {
             mir::TerminatorKind::Drop { place: dropped_place, .. } => {
                 let dropped_ty = dropped_place.ty(self.body, self.tcx).ty;
-                if !NeedsNonConstDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
+
+                // FIXME(effects) use `NeedsNonConstDrop`
+                if !NeedsDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
                     // Instead of throwing a bug, we just return here. This is because we have to
                     // run custom `const Drop` impls.
                     return;
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
index 0ef7ace6965..b152644a551 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -7,6 +7,7 @@ use rustc_hir::LangItem;
 use rustc_infer::infer::TyCtxtInferExt;
 use rustc_middle::mir;
 use rustc_middle::mir::*;
+use rustc_middle::traits::BuiltinImplSource;
 use rustc_middle::ty::{self, AdtDef, GenericArgsRef, Ty};
 use rustc_trait_selection::traits::{
     self, ImplSource, Obligation, ObligationCause, ObligationCtxt, SelectionContext,
@@ -22,7 +23,8 @@ pub fn in_any_value_of_ty<'tcx>(
     ConstQualifs {
         has_mut_interior: HasMutInterior::in_any_value_of_ty(cx, ty),
         needs_drop: NeedsDrop::in_any_value_of_ty(cx, ty),
-        needs_non_const_drop: NeedsNonConstDrop::in_any_value_of_ty(cx, ty),
+        // FIXME(effects)
+        needs_non_const_drop: NeedsDrop::in_any_value_of_ty(cx, ty),
         custom_eq: CustomEq::in_any_value_of_ty(cx, ty),
         tainted_by_errors,
     }
@@ -172,7 +174,8 @@ impl Qualif for NeedsNonConstDrop {
 
         if !matches!(
             impl_src,
-            ImplSource::Builtin(_) | ImplSource::Param(_, ty::BoundConstness::ConstIfConst)
+            ImplSource::Builtin(BuiltinImplSource::Misc, _)
+                | ImplSource::Param(ty::BoundConstness::ConstIfConst, _)
         ) {
             // If our const destruct candidate is not ConstDestruct or implied by the param env,
             // then it's bad
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
index ea0d90dbd51..31effadd2c2 100644
--- a/compiler/rustc_const_eval/src/transform/validate.rs
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -58,11 +58,10 @@ impl<'tcx> MirPass<'tcx> for Validator {
             .iterate_to_fixpoint()
             .into_results_cursor(body);
 
-        let mut checker = TypeChecker {
+        let mut cfg_checker = CfgChecker {
             when: &self.when,
             body,
             tcx,
-            param_env,
             mir_phase,
             unwind_edge_count: 0,
             reachable_blocks: traversal::reachable_as_bitset(body),
@@ -70,13 +69,17 @@ impl<'tcx> MirPass<'tcx> for Validator {
             place_cache: FxHashSet::default(),
             value_cache: FxHashSet::default(),
         };
-        checker.visit_body(body);
-        checker.check_cleanup_control_flow();
+        cfg_checker.visit_body(body);
+        cfg_checker.check_cleanup_control_flow();
+
+        for (location, msg) in validate_types(tcx, self.mir_phase, param_env, body) {
+            cfg_checker.fail(location, msg);
+        }
 
         if let MirPhase::Runtime(_) = body.phase {
             if let ty::InstanceDef::Item(_) = body.source.instance {
                 if body.has_free_regions() {
-                    checker.fail(
+                    cfg_checker.fail(
                         Location::START,
                         format!("Free regions in optimized {} MIR", body.phase.name()),
                     );
@@ -86,11 +89,10 @@ impl<'tcx> MirPass<'tcx> for Validator {
     }
 }
 
-struct TypeChecker<'a, 'tcx> {
+struct CfgChecker<'a, 'tcx> {
     when: &'a str,
     body: &'a Body<'tcx>,
     tcx: TyCtxt<'tcx>,
-    param_env: ParamEnv<'tcx>,
     mir_phase: MirPhase,
     unwind_edge_count: usize,
     reachable_blocks: BitSet<BasicBlock>,
@@ -99,7 +101,7 @@ struct TypeChecker<'a, 'tcx> {
     value_cache: FxHashSet<u128>,
 }
 
-impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+impl<'a, 'tcx> CfgChecker<'a, 'tcx> {
     #[track_caller]
     fn fail(&self, location: Location, msg: impl AsRef<str>) {
         let span = self.body.source_info(location).span;
@@ -147,7 +149,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
                 }
             }
         } else {
-            self.fail(location, format!("encountered jump to invalid basic block {:?}", bb))
+            self.fail(location, format!("encountered jump to invalid basic block {bb:?}"))
         }
     }
 
@@ -220,8 +222,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
                     self.fail(
                         Location { block: bb, statement_index: 0 },
                         format!(
-                            "Cleanup control flow violation: Cycle involving edge {:?} -> {:?}",
-                            bb, parent,
+                            "Cleanup control flow violation: Cycle involving edge {bb:?} -> {parent:?}",
                         ),
                     );
                     break;
@@ -248,35 +249,14 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
             UnwindAction::Unreachable | UnwindAction::Terminate => (),
         }
     }
-
-    /// Check if src can be assigned into dest.
-    /// This is not precise, it will accept some incorrect assignments.
-    fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
-        // Fast path before we normalize.
-        if src == dest {
-            // Equal types, all is good.
-            return true;
-        }
-
-        // We sometimes have to use `defining_opaque_types` for subtyping
-        // to succeed here and figuring out how exactly that should work
-        // is annoying. It is harmless enough to just not validate anything
-        // in that case. We still check this after analysis as all opaque
-        // types have been revealed at this point.
-        if (src, dest).has_opaque_types() {
-            return true;
-        }
-
-        crate::util::is_subtype(self.tcx, self.param_env, src, dest)
-    }
 }
 
-impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
+impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
     fn visit_local(&mut self, local: Local, context: PlaceContext, location: Location) {
         if self.body.local_decls.get(local).is_none() {
             self.fail(
                 location,
-                format!("local {:?} has no corresponding declaration in `body.local_decls`", local),
+                format!("local {local:?} has no corresponding declaration in `body.local_decls`"),
             );
         }
 
@@ -291,11 +271,280 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
             self.storage_liveness.seek_after_primary_effect(location);
             let locals_with_storage = self.storage_liveness.get();
             if !locals_with_storage.contains(local) {
-                self.fail(location, format!("use of local {:?}, which has no storage here", local));
+                self.fail(location, format!("use of local {local:?}, which has no storage here"));
             }
         }
     }
 
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        match &statement.kind {
+            StatementKind::Assign(box (dest, rvalue)) => {
+                // FIXME(JakobDegen): Check this for all rvalues, not just this one.
+                if let Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) = rvalue {
+                    // The sides of an assignment must not alias. Currently this just checks whether
+                    // the places are identical.
+                    if dest == src {
+                        self.fail(
+                            location,
+                            "encountered `Assign` statement with overlapping memory",
+                        );
+                    }
+                }
+            }
+            StatementKind::AscribeUserType(..) => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`AscribeUserType` should have been removed after drop lowering phase",
+                    );
+                }
+            }
+            StatementKind::FakeRead(..) => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`FakeRead` should have been removed after drop lowering phase",
+                    );
+                }
+            }
+            StatementKind::SetDiscriminant { .. } => {
+                if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
+                }
+            }
+            StatementKind::Deinit(..) => {
+                if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(location, "`Deinit`is not allowed until deaggregation");
+                }
+            }
+            StatementKind::Retag(kind, _) => {
+                // FIXME(JakobDegen) The validator should check that `self.mir_phase <
+                // DropsLowered`. However, this causes ICEs with generation of drop shims, which
+                // seem to fail to set their `MirPhase` correctly.
+                if matches!(kind, RetagKind::Raw | RetagKind::TwoPhase) {
+                    self.fail(location, format!("explicit `{kind:?}` is forbidden"));
+                }
+            }
+            StatementKind::StorageLive(local) => {
+                // We check that the local is not live when entering a `StorageLive` for it.
+                // Technically, violating this restriction is only UB and not actually indicative
+                // of not well-formed MIR. This means that an optimization which turns MIR that
+                // already has UB into MIR that fails this check is not necessarily wrong. However,
+                // we have no such optimizations at the moment, and so we include this check anyway
+                // to help us catch bugs. If you happen to write an optimization that might cause
+                // this to incorrectly fire, feel free to remove this check.
+                if self.reachable_blocks.contains(location.block) {
+                    self.storage_liveness.seek_before_primary_effect(location);
+                    let locals_with_storage = self.storage_liveness.get();
+                    if locals_with_storage.contains(*local) {
+                        self.fail(
+                            location,
+                            format!("StorageLive({local:?}) which already has storage here"),
+                        );
+                    }
+                }
+            }
+            StatementKind::StorageDead(_)
+            | StatementKind::Intrinsic(_)
+            | StatementKind::Coverage(_)
+            | StatementKind::ConstEvalCounter
+            | StatementKind::PlaceMention(..)
+            | StatementKind::Nop => {}
+        }
+
+        self.super_statement(statement, location);
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        match &terminator.kind {
+            TerminatorKind::Goto { target } => {
+                self.check_edge(location, *target, EdgeKind::Normal);
+            }
+            TerminatorKind::SwitchInt { targets, discr: _ } => {
+                for (_, target) in targets.iter() {
+                    self.check_edge(location, target, EdgeKind::Normal);
+                }
+                self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
+
+                self.value_cache.clear();
+                self.value_cache.extend(targets.iter().map(|(value, _)| value));
+                let has_duplicates = targets.iter().len() != self.value_cache.len();
+                if has_duplicates {
+                    self.fail(
+                        location,
+                        format!(
+                            "duplicated values in `SwitchInt` terminator: {:?}",
+                            terminator.kind,
+                        ),
+                    );
+                }
+            }
+            TerminatorKind::Drop { target, unwind, .. } => {
+                self.check_edge(location, *target, EdgeKind::Normal);
+                self.check_unwind_edge(location, *unwind);
+            }
+            TerminatorKind::Call { args, destination, target, unwind, .. } => {
+                if let Some(target) = target {
+                    self.check_edge(location, *target, EdgeKind::Normal);
+                }
+                self.check_unwind_edge(location, *unwind);
+
+                // The call destination place and Operand::Move place used as an argument might be
+                // passed by a reference to the callee. Consequently they must be non-overlapping.
+                // Currently this simply checks for duplicate places.
+                self.place_cache.clear();
+                self.place_cache.insert(destination.as_ref());
+                let mut has_duplicates = false;
+                for arg in args {
+                    if let Operand::Move(place) = arg {
+                        has_duplicates |= !self.place_cache.insert(place.as_ref());
+                    }
+                }
+
+                if has_duplicates {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered overlapping memory in `Call` terminator: {:?}",
+                            terminator.kind,
+                        ),
+                    );
+                }
+            }
+            TerminatorKind::Assert { target, unwind, .. } => {
+                self.check_edge(location, *target, EdgeKind::Normal);
+                self.check_unwind_edge(location, *unwind);
+            }
+            TerminatorKind::Yield { resume, drop, .. } => {
+                if self.body.generator.is_none() {
+                    self.fail(location, "`Yield` cannot appear outside generator bodies");
+                }
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(location, "`Yield` should have been replaced by generator lowering");
+                }
+                self.check_edge(location, *resume, EdgeKind::Normal);
+                if let Some(drop) = drop {
+                    self.check_edge(location, *drop, EdgeKind::Normal);
+                }
+            }
+            TerminatorKind::FalseEdge { real_target, imaginary_target } => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`FalseEdge` should have been removed after drop elaboration",
+                    );
+                }
+                self.check_edge(location, *real_target, EdgeKind::Normal);
+                self.check_edge(location, *imaginary_target, EdgeKind::Normal);
+            }
+            TerminatorKind::FalseUnwind { real_target, unwind } => {
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`FalseUnwind` should have been removed after drop elaboration",
+                    );
+                }
+                self.check_edge(location, *real_target, EdgeKind::Normal);
+                self.check_unwind_edge(location, *unwind);
+            }
+            TerminatorKind::InlineAsm { destination, unwind, .. } => {
+                if let Some(destination) = destination {
+                    self.check_edge(location, *destination, EdgeKind::Normal);
+                }
+                self.check_unwind_edge(location, *unwind);
+            }
+            TerminatorKind::GeneratorDrop => {
+                if self.body.generator.is_none() {
+                    self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies");
+                }
+                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+                    self.fail(
+                        location,
+                        "`GeneratorDrop` should have been replaced by generator lowering",
+                    );
+                }
+            }
+            TerminatorKind::Resume | TerminatorKind::Terminate => {
+                let bb = location.block;
+                if !self.body.basic_blocks[bb].is_cleanup {
+                    self.fail(
+                        location,
+                        "Cannot `Resume` or `Terminate` from non-cleanup basic block",
+                    )
+                }
+            }
+            TerminatorKind::Return => {
+                let bb = location.block;
+                if self.body.basic_blocks[bb].is_cleanup {
+                    self.fail(location, "Cannot `Return` from cleanup basic block")
+                }
+            }
+            TerminatorKind::Unreachable => {}
+        }
+
+        self.super_terminator(terminator, location);
+    }
+
+    fn visit_source_scope(&mut self, scope: SourceScope) {
+        if self.body.source_scopes.get(scope).is_none() {
+            self.tcx.sess.diagnostic().delay_span_bug(
+                self.body.span,
+                format!(
+                    "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
+                    self.body.source.instance, self.when, scope,
+                ),
+            );
+        }
+    }
+}
+
+pub fn validate_types<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    mir_phase: MirPhase,
+    param_env: ty::ParamEnv<'tcx>,
+    body: &Body<'tcx>,
+) -> Vec<(Location, String)> {
+    let mut type_checker = TypeChecker { body, tcx, param_env, mir_phase, failures: Vec::new() };
+    type_checker.visit_body(body);
+    type_checker.failures
+}
+
+struct TypeChecker<'a, 'tcx> {
+    body: &'a Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    mir_phase: MirPhase,
+    failures: Vec<(Location, String)>,
+}
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+    fn fail(&mut self, location: Location, msg: impl Into<String>) {
+        self.failures.push((location, msg.into()));
+    }
+
+    /// Check if src can be assigned into dest.
+    /// This is not precise, it will accept some incorrect assignments.
+    fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
+        // Fast path before we normalize.
+        if src == dest {
+            // Equal types, all is good.
+            return true;
+        }
+
+        // We sometimes have to use `defining_opaque_types` for subtyping
+        // to succeed here and figuring out how exactly that should work
+        // is annoying. It is harmless enough to just not validate anything
+        // in that case. We still check this after analysis as all opaque
+        // types have been revealed at this point.
+        if (src, dest).has_opaque_types() {
+            return true;
+        }
+
+        crate::util::is_subtype(self.tcx, self.param_env, src, dest)
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
     fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
         // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
         if self.tcx.sess.opts.unstable_opts.validate_mir
@@ -306,7 +555,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 let ty = place.ty(&self.body.local_decls, self.tcx).ty;
 
                 if !ty.is_copy_modulo_regions(self.tcx, self.param_env) {
-                    self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
+                    self.fail(location, format!("`Operand::Copy` with non-`Copy` type {ty}"));
                 }
             }
         }
@@ -325,7 +574,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
             ProjectionElem::Index(index) => {
                 let index_ty = self.body.local_decls[index].ty;
                 if index_ty != self.tcx.types.usize {
-                    self.fail(location, format!("bad index ({:?} != usize)", index_ty))
+                    self.fail(location, format!("bad index ({index_ty:?} != usize)"))
                 }
             }
             ProjectionElem::Deref
@@ -336,22 +585,21 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 if base_ty.is_box() {
                     self.fail(
                         location,
-                        format!("{:?} dereferenced after ElaborateBoxDerefs", base_ty),
+                        format!("{base_ty:?} dereferenced after ElaborateBoxDerefs"),
                     )
                 }
             }
             ProjectionElem::Field(f, ty) => {
                 let parent_ty = place_ref.ty(&self.body.local_decls, self.tcx);
-                let fail_out_of_bounds = |this: &Self, location| {
-                    this.fail(location, format!("Out of bounds field {:?} for {:?}", f, parent_ty));
+                let fail_out_of_bounds = |this: &mut Self, location| {
+                    this.fail(location, format!("Out of bounds field {f:?} for {parent_ty:?}"));
                 };
-                let check_equal = |this: &Self, location, f_ty| {
+                let check_equal = |this: &mut Self, location, f_ty| {
                     if !this.mir_assign_valid_types(ty, f_ty) {
                         this.fail(
                             location,
                             format!(
-                                "Field projection `{:?}.{:?}` specified type `{:?}`, but actual type is `{:?}`",
-                                place_ref, f, ty, f_ty
+                                "Field projection `{place_ref:?}.{f:?}` specified type `{ty:?}`, but actual type is `{f_ty:?}`"
                             )
                         )
                     }
@@ -399,7 +647,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                             let Some(layout) = gen_body.generator_layout() else {
                                 self.fail(
                                     location,
-                                    format!("No generator layout for {:?}", parent_ty),
+                                    format!("No generator layout for {parent_ty:?}"),
                                 );
                                 return;
                             };
@@ -412,7 +660,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                             let Some(f_ty) = layout.field_tys.get(local) else {
                                 self.fail(
                                     location,
-                                    format!("Out of bounds local {:?} for {:?}", local, parent_ty),
+                                    format!("Out of bounds local {local:?} for {parent_ty:?}"),
                                 );
                                 return;
                             };
@@ -440,9 +688,9 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
     }
 
     fn visit_var_debug_info(&mut self, debuginfo: &VarDebugInfo<'tcx>) {
-        let check_place = |place: Place<'_>| {
+        let check_place = |this: &mut Self, place: Place<'_>| {
             if place.projection.iter().any(|p| !p.can_use_in_debuginfo()) {
-                self.fail(
+                this.fail(
                     START_BLOCK.start_location(),
                     format!("illegal place {:?} in debuginfo for {:?}", place, debuginfo.name),
                 );
@@ -451,21 +699,21 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
         match debuginfo.value {
             VarDebugInfoContents::Const(_) => {}
             VarDebugInfoContents::Place(place) => {
-                check_place(place);
+                check_place(self, place);
                 if debuginfo.references != 0 && place.projection.last() == Some(&PlaceElem::Deref) {
                     self.fail(
                         START_BLOCK.start_location(),
-                        format!("debuginfo {:?}, has both ref and deref", debuginfo),
+                        format!("debuginfo {debuginfo:?}, has both ref and deref"),
                     );
                 }
             }
             VarDebugInfoContents::Composite { ty, ref fragments } => {
                 for f in fragments {
-                    check_place(f.contents);
+                    check_place(self, f.contents);
                     if ty.is_union() || ty.is_enum() {
                         self.fail(
                             START_BLOCK.start_location(),
-                            format!("invalid type {:?} for composite debuginfo", ty),
+                            format!("invalid type {ty:?} for composite debuginfo"),
                         );
                     }
                     if f.projection.iter().any(|p| !matches!(p, PlaceElem::Field(..))) {
@@ -492,7 +740,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
             && cntxt != PlaceContext::NonUse(NonUseContext::VarDebugInfo)
             && place.projection[1..].contains(&ProjectionElem::Deref)
         {
-            self.fail(location, format!("{:?}, has deref at the wrong place", place));
+            self.fail(location, format!("{place:?}, has deref at the wrong place"));
         }
 
         self.super_place(place, cntxt, location);
@@ -552,7 +800,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                     Offset => {
                         check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..));
                         if b != self.tcx.types.isize && b != self.tcx.types.usize {
-                            self.fail(location, format!("Cannot offset by non-isize type {:?}", b));
+                            self.fail(location, format!("Cannot offset by non-isize type {b:?}"));
                         }
                     }
                     Eq | Lt | Le | Ne | Ge | Gt => {
@@ -617,13 +865,12 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                             self.fail(
                                 location,
                                 format!(
-                                    "Cannot perform checked arithmetic on unequal types {:?} and {:?}",
-                                    a, b
+                                    "Cannot perform checked arithmetic on unequal types {a:?} and {b:?}"
                                 ),
                             );
                         }
                     }
-                    _ => self.fail(location, format!("There is no checked version of {:?}", op)),
+                    _ => self.fail(location, format!("There is no checked version of {op:?}")),
                 }
             }
             Rvalue::UnaryOp(op, operand) => {
@@ -718,7 +965,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 }
             }
             Rvalue::NullaryOp(NullOp::OffsetOf(fields), container) => {
-                let fail_out_of_bounds = |this: &Self, location, field, ty| {
+                let fail_out_of_bounds = |this: &mut Self, location, field, ty| {
                     this.fail(location, format!("Out of bounds field {field:?} for {ty:?}"));
                 };
 
@@ -828,7 +1075,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 if !ty.is_bool() {
                     self.fail(
                         location,
-                        format!("`assume` argument must be `bool`, but got: `{}`", ty),
+                        format!("`assume` argument must be `bool`, but got: `{ty}`"),
                     );
                 }
             }
@@ -841,7 +1088,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 } else {
                     self.fail(
                         location,
-                        format!("Expected src to be ptr in copy_nonoverlapping, got: {}", src_ty),
+                        format!("Expected src to be ptr in copy_nonoverlapping, got: {src_ty}"),
                     );
                     return;
                 };
@@ -851,19 +1098,19 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 } else {
                     self.fail(
                         location,
-                        format!("Expected dst to be ptr in copy_nonoverlapping, got: {}", dst_ty),
+                        format!("Expected dst to be ptr in copy_nonoverlapping, got: {dst_ty}"),
                     );
                     return;
                 };
                 // since CopyNonOverlapping is parametrized by 1 type,
                 // we only need to check that they are equal and not keep an extra parameter.
                 if !self.mir_assign_valid_types(op_src_ty, op_dst_ty) {
-                    self.fail(location, format!("bad arg ({:?} != {:?})", op_src_ty, op_dst_ty));
+                    self.fail(location, format!("bad arg ({op_src_ty:?} != {op_dst_ty:?})"));
                 }
 
                 let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
                 if op_cnt_ty != self.tcx.types.usize {
-                    self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
+                    self.fail(location, format!("bad arg ({op_cnt_ty:?} != usize)"))
                 }
             }
             StatementKind::SetDiscriminant { place, .. } => {
@@ -875,8 +1122,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                     self.fail(
                         location,
                         format!(
-                            "`SetDiscriminant` is only allowed on ADTs and generators, not {:?}",
-                            pty
+                            "`SetDiscriminant` is only allowed on ADTs and generators, not {pty:?}"
                         ),
                     );
                 }
@@ -891,29 +1137,11 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 // DropsLowered`. However, this causes ICEs with generation of drop shims, which
                 // seem to fail to set their `MirPhase` correctly.
                 if matches!(kind, RetagKind::Raw | RetagKind::TwoPhase) {
-                    self.fail(location, format!("explicit `{:?}` is forbidden", kind));
+                    self.fail(location, format!("explicit `{kind:?}` is forbidden"));
                 }
             }
-            StatementKind::StorageLive(local) => {
-                // We check that the local is not live when entering a `StorageLive` for it.
-                // Technically, violating this restriction is only UB and not actually indicative
-                // of not well-formed MIR. This means that an optimization which turns MIR that
-                // already has UB into MIR that fails this check is not necessarily wrong. However,
-                // we have no such optimizations at the moment, and so we include this check anyway
-                // to help us catch bugs. If you happen to write an optimization that might cause
-                // this to incorrectly fire, feel free to remove this check.
-                if self.reachable_blocks.contains(location.block) {
-                    self.storage_liveness.seek_before_primary_effect(location);
-                    let locals_with_storage = self.storage_liveness.get();
-                    if locals_with_storage.contains(*local) {
-                        self.fail(
-                            location,
-                            format!("StorageLive({local:?}) which already has storage here"),
-                        );
-                    }
-                }
-            }
-            StatementKind::StorageDead(_)
+            StatementKind::StorageLive(_)
+            | StatementKind::StorageDead(_)
             | StatementKind::Coverage(_)
             | StatementKind::ConstEvalCounter
             | StatementKind::PlaceMention(..)
@@ -925,9 +1153,6 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
 
     fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
         match &terminator.kind {
-            TerminatorKind::Goto { target } => {
-                self.check_edge(location, *target, EdgeKind::Normal);
-            }
             TerminatorKind::SwitchInt { targets, discr } => {
                 let switch_ty = discr.ty(&self.body.local_decls, self.tcx);
 
@@ -941,164 +1166,49 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                     other => bug!("unhandled type: {:?}", other),
                 });
 
-                for (value, target) in targets.iter() {
+                for (value, _) in targets.iter() {
                     if Scalar::<()>::try_from_uint(value, size).is_none() {
                         self.fail(
                             location,
-                            format!("the value {:#x} is not a proper {:?}", value, switch_ty),
+                            format!("the value {value:#x} is not a proper {switch_ty:?}"),
                         )
                     }
-
-                    self.check_edge(location, target, EdgeKind::Normal);
-                }
-                self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
-
-                self.value_cache.clear();
-                self.value_cache.extend(targets.iter().map(|(value, _)| value));
-                let has_duplicates = targets.iter().len() != self.value_cache.len();
-                if has_duplicates {
-                    self.fail(
-                        location,
-                        format!(
-                            "duplicated values in `SwitchInt` terminator: {:?}",
-                            terminator.kind,
-                        ),
-                    );
                 }
             }
-            TerminatorKind::Drop { target, unwind, .. } => {
-                self.check_edge(location, *target, EdgeKind::Normal);
-                self.check_unwind_edge(location, *unwind);
-            }
-            TerminatorKind::Call { func, args, destination, target, unwind, .. } => {
+            TerminatorKind::Call { func, .. } => {
                 let func_ty = func.ty(&self.body.local_decls, self.tcx);
                 match func_ty.kind() {
                     ty::FnPtr(..) | ty::FnDef(..) => {}
                     _ => self.fail(
                         location,
-                        format!("encountered non-callable type {} in `Call` terminator", func_ty),
+                        format!("encountered non-callable type {func_ty} in `Call` terminator"),
                     ),
                 }
-                if let Some(target) = target {
-                    self.check_edge(location, *target, EdgeKind::Normal);
-                }
-                self.check_unwind_edge(location, *unwind);
-
-                // The call destination place and Operand::Move place used as an argument might be
-                // passed by a reference to the callee. Consequently they must be non-overlapping.
-                // Currently this simply checks for duplicate places.
-                self.place_cache.clear();
-                self.place_cache.insert(destination.as_ref());
-                let mut has_duplicates = false;
-                for arg in args {
-                    if let Operand::Move(place) = arg {
-                        has_duplicates |= !self.place_cache.insert(place.as_ref());
-                    }
-                }
-
-                if has_duplicates {
-                    self.fail(
-                        location,
-                        format!(
-                            "encountered overlapping memory in `Call` terminator: {:?}",
-                            terminator.kind,
-                        ),
-                    );
-                }
             }
-            TerminatorKind::Assert { cond, target, unwind, .. } => {
+            TerminatorKind::Assert { cond, .. } => {
                 let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
                 if cond_ty != self.tcx.types.bool {
                     self.fail(
                         location,
                         format!(
-                            "encountered non-boolean condition of type {} in `Assert` terminator",
-                            cond_ty
+                            "encountered non-boolean condition of type {cond_ty} in `Assert` terminator"
                         ),
                     );
                 }
-                self.check_edge(location, *target, EdgeKind::Normal);
-                self.check_unwind_edge(location, *unwind);
-            }
-            TerminatorKind::Yield { resume, drop, .. } => {
-                if self.body.generator.is_none() {
-                    self.fail(location, "`Yield` cannot appear outside generator bodies");
-                }
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(location, "`Yield` should have been replaced by generator lowering");
-                }
-                self.check_edge(location, *resume, EdgeKind::Normal);
-                if let Some(drop) = drop {
-                    self.check_edge(location, *drop, EdgeKind::Normal);
-                }
-            }
-            TerminatorKind::FalseEdge { real_target, imaginary_target } => {
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(
-                        location,
-                        "`FalseEdge` should have been removed after drop elaboration",
-                    );
-                }
-                self.check_edge(location, *real_target, EdgeKind::Normal);
-                self.check_edge(location, *imaginary_target, EdgeKind::Normal);
-            }
-            TerminatorKind::FalseUnwind { real_target, unwind } => {
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(
-                        location,
-                        "`FalseUnwind` should have been removed after drop elaboration",
-                    );
-                }
-                self.check_edge(location, *real_target, EdgeKind::Normal);
-                self.check_unwind_edge(location, *unwind);
-            }
-            TerminatorKind::InlineAsm { destination, unwind, .. } => {
-                if let Some(destination) = destination {
-                    self.check_edge(location, *destination, EdgeKind::Normal);
-                }
-                self.check_unwind_edge(location, *unwind);
             }
-            TerminatorKind::GeneratorDrop => {
-                if self.body.generator.is_none() {
-                    self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies");
-                }
-                if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
-                    self.fail(
-                        location,
-                        "`GeneratorDrop` should have been replaced by generator lowering",
-                    );
-                }
-            }
-            TerminatorKind::Resume | TerminatorKind::Terminate => {
-                let bb = location.block;
-                if !self.body.basic_blocks[bb].is_cleanup {
-                    self.fail(
-                        location,
-                        "Cannot `Resume` or `Terminate` from non-cleanup basic block",
-                    )
-                }
-            }
-            TerminatorKind::Return => {
-                let bb = location.block;
-                if self.body.basic_blocks[bb].is_cleanup {
-                    self.fail(location, "Cannot `Return` from cleanup basic block")
-                }
-            }
-            TerminatorKind::Unreachable => {}
+            TerminatorKind::Goto { .. }
+            | TerminatorKind::Drop { .. }
+            | TerminatorKind::Yield { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::InlineAsm { .. }
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::Resume
+            | TerminatorKind::Terminate
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable => {}
         }
 
         self.super_terminator(terminator, location);
     }
-
-    fn visit_source_scope(&mut self, scope: SourceScope) {
-        if self.body.source_scopes.get(scope).is_none() {
-            self.tcx.sess.diagnostic().delay_span_bug(
-                self.body.span,
-                format!(
-                    "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
-                    self.body.source.instance, self.when, scope,
-                ),
-            );
-        }
-    }
 }
diff --git a/compiler/rustc_const_eval/src/util/compare_types.rs b/compiler/rustc_const_eval/src/util/compare_types.rs
index d6a2ffb7511..83376c8e992 100644
--- a/compiler/rustc_const_eval/src/util/compare_types.rs
+++ b/compiler/rustc_const_eval/src/util/compare_types.rs
@@ -56,8 +56,16 @@ pub fn is_subtype<'tcx>(
     // With `Reveal::All`, opaque types get normalized away, with `Reveal::UserFacing`
     // we would get unification errors because we're unable to look into opaque types,
     // even if they're constrained in our current function.
-    //
-    // It seems very unlikely that this hides any bugs.
-    let _ = infcx.take_opaque_types();
+    for (key, ty) in infcx.take_opaque_types() {
+        let hidden_ty = tcx.type_of(key.def_id).instantiate(tcx, key.args);
+        if hidden_ty != ty.hidden_type.ty {
+            span_bug!(
+                ty.hidden_type.span,
+                "{}, {}",
+                tcx.type_of(key.def_id).instantiate(tcx, key.args),
+                ty.hidden_type.ty
+            );
+        }
+    }
     errors.is_empty()
 }