about summary refs log tree commit diff
path: root/compiler/rustc_const_eval/src/interpret
diff options
context:
space:
mode:
authorRalf Jung <post@ralfj.de>2023-07-24 11:44:58 +0200
committerRalf Jung <post@ralfj.de>2023-07-25 14:30:58 +0200
commita2bcafa500fa407fa77716ab78b353b7d7daac5d (patch)
tree58e593f4394ea240723c4b6aa69a84b2e6ac437a /compiler/rustc_const_eval/src/interpret
parenta593de4fab309968d305f9c6812c2203d4431464 (diff)
downloadrust-a2bcafa500fa407fa77716ab78b353b7d7daac5d.tar.gz
rust-a2bcafa500fa407fa77716ab78b353b7d7daac5d.zip
interpret: refactor projection code to work on a common trait, and use that for visitors
Diffstat (limited to 'compiler/rustc_const_eval/src/interpret')
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs4
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs28
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs127
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs6
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs6
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs3
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs128
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs4
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs146
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs424
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs4
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs19
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs116
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs694
14 files changed, 660 insertions, 1049 deletions
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index dd7a1fcc165..977e49b6343 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -420,8 +420,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     if cast_ty_field.is_zst() {
                         continue;
                     }
-                    let src_field = self.operand_field(src, i)?;
-                    let dst_field = self.place_field(dest, i)?;
+                    let src_field = self.project_field(src, i)?;
+                    let dst_field = self.project_field(dest, i)?;
                     if src_field.layout.ty == cast_ty_field.ty {
                         self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?;
                     } else {
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index f23a455c2ca..9ea5e7cb1f9 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -22,7 +22,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // When evaluating we will always error before even getting here, but ConstProp 'executes'
         // dead code, so we cannot ICE here.
         if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
-            throw_ub!(UninhabitedEnumVariantWritten)
+            throw_ub!(UninhabitedEnumVariantWritten(variant_index))
         }
 
         match dest.layout.variants {
@@ -47,7 +47,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let size = tag_layout.size(self);
                 let tag_val = size.truncate(discr_val);
 
-                let tag_dest = self.place_field(dest, tag_field)?;
+                let tag_dest = self.project_field(dest, tag_field)?;
                 self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
             }
             abi::Variants::Multiple {
@@ -78,7 +78,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         &niche_start_val,
                     )?;
                     // Write result.
-                    let niche_dest = self.place_field(dest, tag_field)?;
+                    let niche_dest = self.project_field(dest, tag_field)?;
                     self.write_immediate(*tag_val, &niche_dest)?;
                 }
             }
@@ -106,6 +106,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
         let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
             Variants::Single { index } => {
+                // Hilariously, `Single` is used even for 0-variant enums.
+                // (See https://github.com/rust-lang/rust/issues/89765).
+                if matches!(op.layout.ty.kind(), ty::Adt(def, ..) if def.variants().is_empty()) {
+                    throw_ub!(UninhabitedEnumVariantRead(index))
+                }
                 let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
                     Some(discr) => {
                         // This type actually has discriminants.
@@ -118,6 +123,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         Scalar::from_uint(index.as_u32(), discr_layout.size)
                     }
                 };
+                // For consisteny with `write_discriminant`, and to make sure that
+                // `project_downcast` cannot fail due to strange layouts, we declare immediate UB
+                // for uninhabited variants.
+                if op.layout.ty.is_enum() && op.layout.for_variant(self, index).abi.is_uninhabited() {
+                    throw_ub!(UninhabitedEnumVariantRead(index))
+                }
                 return Ok((discr, index));
             }
             Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
@@ -138,13 +149,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
 
         // Read tag and sanity-check `tag_layout`.
-        let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
+        let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?;
         assert_eq!(tag_layout.size, tag_val.layout.size);
         assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
         trace!("tag value: {}", tag_val);
 
         // Figure out which discriminant and variant this corresponds to.
-        Ok(match *tag_encoding {
+        let (discr, index) = match *tag_encoding {
             TagEncoding::Direct => {
                 let scalar = tag_val.to_scalar();
                 // Generate a specific error if `tag_val` is not an integer.
@@ -232,6 +243,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 // encoded in the tag.
                 (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
             }
-        })
+        };
+        // For consisteny with `write_discriminant`, and to make sure that `project_downcast` cannot fail due to strange layouts, we declare immediate UB for uninhabited variants.
+        if op.layout.for_variant(self, index).abi.is_uninhabited() {
+            throw_ub!(UninhabitedEnumVariantRead(index))
+        }
+        Ok((discr, index))
     }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 107e5bec614..3a7fe8bd478 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -164,75 +164,6 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
         &self.ecx
     }
 
-    fn visit_aggregate(
-        &mut self,
-        mplace: &MPlaceTy<'tcx>,
-        fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
-    ) -> InterpResult<'tcx> {
-        // We want to walk the aggregate to look for references to intern. While doing that we
-        // also need to take special care of interior mutability.
-        //
-        // As an optimization, however, if the allocation does not contain any references: we don't
-        // need to do the walk. It can be costly for big arrays for example (e.g. issue #93215).
-        let is_walk_needed = |mplace: &MPlaceTy<'tcx>| -> InterpResult<'tcx, bool> {
-            // ZSTs cannot contain pointers, we can avoid the interning walk.
-            if mplace.layout.is_zst() {
-                return Ok(false);
-            }
-
-            // Now, check whether this allocation could contain references.
-            //
-            // Note, this check may sometimes not be cheap, so we only do it when the walk we'd like
-            // to avoid could be expensive: on the potentially larger types, arrays and slices,
-            // rather than on all aggregates unconditionally.
-            if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
-                let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
-                    // We do the walk if we can't determine the size of the mplace: we may be
-                    // dealing with extern types here in the future.
-                    return Ok(true);
-                };
-
-                // If there is no provenance in this allocation, it does not contain references
-                // that point to another allocation, and we can avoid the interning walk.
-                if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
-                    if !alloc.has_provenance() {
-                        return Ok(false);
-                    }
-                } else {
-                    // We're encountering a ZST here, and can avoid the walk as well.
-                    return Ok(false);
-                }
-            }
-
-            // In the general case, we do the walk.
-            Ok(true)
-        };
-
-        // If this allocation contains no references to intern, we avoid the potentially costly
-        // walk.
-        //
-        // We can do this before the checks for interior mutability below, because only references
-        // are relevant in that situation, and we're checking if there are any here.
-        if !is_walk_needed(mplace)? {
-            return Ok(());
-        }
-
-        if let Some(def) = mplace.layout.ty.ty_adt_def() {
-            if def.is_unsafe_cell() {
-                // We are crossing over an `UnsafeCell`, we can mutate again. This means that
-                // References we encounter inside here are interned as pointing to mutable
-                // allocations.
-                // Remember the `old` value to handle nested `UnsafeCell`.
-                let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
-                let walked = self.walk_aggregate(mplace, fields);
-                self.inside_unsafe_cell = old;
-                return walked;
-            }
-        }
-
-        self.walk_aggregate(mplace, fields)
-    }
-
     fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
         // Handle Reference types, as these are the only types with provenance supported by const eval.
         // Raw pointers (and boxes) are handled by the `leftover_allocations` logic.
@@ -315,7 +246,63 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
             }
             Ok(())
         } else {
-            // Not a reference -- proceed recursively.
+            // Not a reference. Check if we want to recurse.
+            let is_walk_needed = |mplace: &MPlaceTy<'tcx>| -> InterpResult<'tcx, bool> {
+                // ZSTs cannot contain pointers, we can avoid the interning walk.
+                if mplace.layout.is_zst() {
+                    return Ok(false);
+                }
+
+                // Now, check whether this allocation could contain references.
+                //
+                // Note, this check may sometimes not be cheap, so we only do it when the walk we'd like
+                // to avoid could be expensive: on the potentially larger types, arrays and slices,
+                // rather than on all aggregates unconditionally.
+                if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
+                    let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
+                        // We do the walk if we can't determine the size of the mplace: we may be
+                        // dealing with extern types here in the future.
+                        return Ok(true);
+                    };
+
+                    // If there is no provenance in this allocation, it does not contain references
+                    // that point to another allocation, and we can avoid the interning walk.
+                    if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
+                        if !alloc.has_provenance() {
+                            return Ok(false);
+                        }
+                    } else {
+                        // We're encountering a ZST here, and can avoid the walk as well.
+                        return Ok(false);
+                    }
+                }
+
+                // In the general case, we do the walk.
+                Ok(true)
+            };
+
+            // If this allocation contains no references to intern, we avoid the potentially costly
+            // walk.
+            //
+            // We can do this before the checks for interior mutability below, because only references
+            // are relevant in that situation, and we're checking if there are any here.
+            if !is_walk_needed(mplace)? {
+                return Ok(());
+            }
+
+            if let Some(def) = mplace.layout.ty.ty_adt_def() {
+                if def.is_unsafe_cell() {
+                    // We are crossing over an `UnsafeCell`, we can mutate again. This means that
+                    // References we encounter inside here are interned as pointing to mutable
+                    // allocations.
+                    // Remember the `old` value to handle nested `UnsafeCell`.
+                    let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
+                    let walked = self.walk_value(mplace);
+                    self.inside_unsafe_cell = old;
+                    return walked;
+                }
+            }
+
             self.walk_value(mplace)
         }
     }
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index 04cae23f852..5f981c9b918 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -425,11 +425,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 );
 
                 for i in 0..dest_len {
-                    let place = self.mplace_index(&dest, i)?;
+                    let place = self.project_index(&dest, i)?;
                     let value = if i == index {
                         elem.clone()
                     } else {
-                        self.mplace_index(&input, i)?.into()
+                        self.project_index(&input, i)?.into()
                     };
                     self.copy_op(&value, &place.into(), /*allow_transmute*/ false)?;
                 }
@@ -444,7 +444,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     input_len
                 );
                 self.copy_op(
-                    &self.mplace_index(&input, index)?.into(),
+                    &self.project_index(&input, index)?.into(),
                     dest,
                     /*allow_transmute*/ false,
                 )?;
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
index c4fe293bfac..44a12751743 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
@@ -101,11 +101,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
 
         // Initialize fields.
-        self.write_immediate(file.to_ref(self), &self.mplace_field(&location, 0).unwrap().into())
+        self.write_immediate(file.to_ref(self), &self.project_field(&location, 0).unwrap().into())
             .expect("writing to memory we just allocated cannot fail");
-        self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into())
+        self.write_scalar(line, &self.project_field(&location, 1).unwrap().into())
             .expect("writing to memory we just allocated cannot fail");
-        self.write_scalar(col, &self.mplace_field(&location, 2).unwrap().into())
+        self.write_scalar(col, &self.project_field(&location, 2).unwrap().into())
             .expect("writing to memory we just allocated cannot fail");
 
         location
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index f657f954f9c..7974920bc14 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -26,9 +26,10 @@ pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackP
 pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
 pub use self::operand::{ImmTy, Immediate, OpTy, Operand};
 pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy};
+pub use self::projection::Projectable;
 pub use self::terminator::FnArg;
 pub use self::validity::{CtfeValidationMode, RefTracking};
-pub use self::visitor::{MutValueVisitor, Value, ValueVisitor};
+pub use self::visitor::ValueVisitor;
 
 pub(crate) use self::intrinsics::eval_nullary_intrinsic;
 use eval_context::{from_known_layout, mir_assign_valid_types};
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index e6d0a73c521..be39e63ab4f 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -1,6 +1,8 @@
 //! Functions concerning immediate values and operands, and reading from operands.
 //! All high-level functions to read from memory work on operands as sources.
 
+use std::assert_matches::assert_matches;
+
 use either::{Either, Left, Right};
 
 use rustc_hir::def::Namespace;
@@ -14,7 +16,7 @@ use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
 use super::{
     alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
     InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer,
-    Provenance, Scalar,
+    Projectable, Provenance, Scalar,
 };
 
 /// An `Immediate` represents a single immediate self-contained Rust value.
@@ -199,6 +201,20 @@ impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
     }
 }
 
+impl<'tcx, Prov: Provenance> From<&'_ ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+    #[inline(always)]
+    fn from(val: &ImmTy<'tcx, Prov>) -> Self {
+        OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
+    }
+}
+
+impl<'tcx, Prov: Provenance> From<&'_ mut ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+    #[inline(always)]
+    fn from(val: &mut ImmTy<'tcx, Prov>) -> Self {
+        OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
+    }
+}
+
 impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
     #[inline]
     pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
@@ -243,12 +259,8 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
 
     /// Compute the "sub-immediate" that is located within the `base` at the given offset with the
     /// given layout.
-    pub(super) fn offset(
-        &self,
-        offset: Size,
-        layout: TyAndLayout<'tcx>,
-        cx: &impl HasDataLayout,
-    ) -> Self {
+    // Not called `offset` to avoid confusion with the trait method.
+    fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
         // This makes several assumptions about what layouts we will encounter; we match what
         // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
         let inner_val: Immediate<_> = match (**self, self.layout.abi) {
@@ -256,14 +268,28 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
             (Immediate::Uninit, _) => Immediate::Uninit,
             // the field contains no information, can be left uninit
             _ if layout.is_zst() => Immediate::Uninit,
+            // some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
+            // to detect those here and also give them no data
+            _ if matches!(layout.abi, Abi::Aggregate { .. })
+                && matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) =>
+            {
+                Immediate::Uninit
+            }
             // the field covers the entire type
             _ if layout.size == self.layout.size => {
-                assert!(match (self.layout.abi, layout.abi) {
-                    (Abi::Scalar(..), Abi::Scalar(..)) => true,
-                    (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
-                    _ => false,
-                });
-                assert!(offset.bytes() == 0);
+                assert_eq!(offset.bytes(), 0);
+                assert!(
+                    match (self.layout.abi, layout.abi) {
+                        (Abi::Scalar(..), Abi::Scalar(..)) => true,
+                        (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
+                        _ => false,
+                    },
+                    "cannot project into {} immediate with equally-sized field {}\nouter ABI: {:#?}\nfield ABI: {:#?}",
+                    self.layout.ty,
+                    layout.ty,
+                    self.layout.abi,
+                    layout.abi,
+                );
                 **self
             }
             // extract fields from types with `ScalarPair` ABI
@@ -286,8 +312,42 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
     }
 }
 
+impl<'mir, 'tcx: 'mir, Prov: Provenance> Projectable<'mir, 'tcx, Prov> for ImmTy<'tcx, Prov> {
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    fn meta<M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+        assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here
+        Ok(MemPlaceMeta::None)
+    }
+
+    fn offset_with_meta(
+        &self,
+        offset: Size,
+        meta: MemPlaceMeta<Prov>,
+        layout: TyAndLayout<'tcx>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, Self> {
+        assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway
+        Ok(self.offset_(offset, layout, cx))
+    }
+
+    fn to_op<M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        Ok(self.into())
+    }
+}
+
 impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
-    pub(super) fn meta(&self) -> InterpResult<'tcx, MemPlaceMeta<Prov>> {
+    // Provided as inherent method since it doesn't need the `ecx` of `Projectable::meta`.
+    pub fn meta(&self) -> InterpResult<'tcx, MemPlaceMeta<Prov>> {
         Ok(if self.layout.is_unsized() {
             if matches!(self.op, Operand::Immediate(_)) {
                 // Unsized immediate OpTy cannot occur. We create a MemPlace for all unsized locals during argument passing.
@@ -300,15 +360,24 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
             MemPlaceMeta::None
         })
     }
+}
 
-    pub fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
-        self.meta()?.len(self.layout, cx)
+impl<'mir, 'tcx: 'mir, Prov: Provenance + 'static> Projectable<'mir, 'tcx, Prov>
+    for OpTy<'tcx, Prov>
+{
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
     }
 
-    /// Offset the operand in memory (if possible) and change its metadata.
-    ///
-    /// This can go wrong very easily if you give the wrong layout for the new place!
-    pub(super) fn offset_with_meta(
+    fn meta<M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+        self.meta()
+    }
+
+    fn offset_with_meta(
         &self,
         offset: Size,
         meta: MemPlaceMeta<Prov>,
@@ -320,22 +389,16 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
             Right(imm) => {
                 assert!(!meta.has_meta()); // no place to store metadata here
                 // Every part of an uninit is uninit.
-                Ok(imm.offset(offset, layout, cx).into())
+                Ok(imm.offset(offset, layout, cx)?.into())
             }
         }
     }
 
-    /// Offset the operand in memory (if possible).
-    ///
-    /// This can go wrong very easily if you give the wrong layout for the new place!
-    pub fn offset(
+    fn to_op<M: Machine<'mir, 'tcx, Provenance = Prov>>(
         &self,
-        offset: Size,
-        layout: TyAndLayout<'tcx>,
-        cx: &impl HasDataLayout,
-    ) -> InterpResult<'tcx, Self> {
-        assert!(layout.is_sized());
-        self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        Ok(self.clone())
     }
 }
 
@@ -525,7 +588,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// Every place can be read from, so we can turn them into an operand.
     /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
     /// will never actually read from memory.
-    #[inline(always)]
     pub fn place_to_op(
         &self,
         place: &PlaceTy<'tcx, M::Provenance>,
@@ -564,7 +626,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let mut op = self.local_to_op(self.frame(), mir_place.local, layout)?;
         // Using `try_fold` turned out to be bad for performance, hence the loop.
         for elem in mir_place.projection.iter() {
-            op = self.operand_projection(&op, elem)?
+            op = self.project(&op, elem)?
         }
 
         trace!("eval_place_to_op: got {:?}", *op);
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index e04764636cc..49c3b152e1d 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -38,9 +38,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             // With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to
             // do a component-wise write here. This code path is slower than the above because
             // `place_field` will have to `force_allocate` locals here.
-            let val_field = self.place_field(&dest, 0)?;
+            let val_field = self.project_field(dest, 0)?;
             self.write_scalar(val, &val_field)?;
-            let overflowed_field = self.place_field(&dest, 1)?;
+            let overflowed_field = self.project_field(dest, 1)?;
             self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
         }
         Ok(())
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index dd07c5fa877..9c492fb8b93 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -18,7 +18,7 @@ use rustc_target::abi::{self, Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_V
 use super::{
     alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
     ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand,
-    Pointer, Provenance, Scalar,
+    Pointer, Projectable, Provenance, Scalar,
 };
 
 #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@@ -183,7 +183,8 @@ impl<Prov: Provenance> MemPlace<Prov> {
     }
 
     #[inline]
-    fn offset_with_meta<'tcx>(
+    // Not called `offset_with_meta` to avoid confusion with the trait method.
+    fn offset_with_meta_<'tcx>(
         self,
         offset: Size,
         meta: MemPlaceMeta<Prov>,
@@ -195,11 +196,6 @@ impl<Prov: Provenance> MemPlace<Prov> {
         );
         Ok(MemPlace { ptr: self.ptr.offset(offset, cx)?, meta })
     }
-
-    #[inline]
-    fn offset<'tcx>(&self, offset: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
-        self.offset_with_meta(offset, MemPlaceMeta::None, cx)
-    }
 }
 
 impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
@@ -214,37 +210,6 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
         MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None }, layout, align }
     }
 
-    /// Offset the place in memory and change its metadata.
-    ///
-    /// This can go wrong very easily if you give the wrong layout for the new place!
-    #[inline]
-    pub(crate) fn offset_with_meta(
-        &self,
-        offset: Size,
-        meta: MemPlaceMeta<Prov>,
-        layout: TyAndLayout<'tcx>,
-        cx: &impl HasDataLayout,
-    ) -> InterpResult<'tcx, Self> {
-        Ok(MPlaceTy {
-            mplace: self.mplace.offset_with_meta(offset, meta, cx)?,
-            align: self.align.restrict_for_offset(offset),
-            layout,
-        })
-    }
-
-    /// Offset the place in memory.
-    ///
-    /// This can go wrong very easily if you give the wrong layout for the new place!
-    pub fn offset(
-        &self,
-        offset: Size,
-        layout: TyAndLayout<'tcx>,
-        cx: &impl HasDataLayout,
-    ) -> InterpResult<'tcx, Self> {
-        assert!(layout.is_sized());
-        self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
-    }
-
     #[inline]
     pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self {
         MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi }
@@ -262,10 +227,42 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
             align: layout.align.abi,
         }
     }
+}
 
-    #[inline]
-    pub(crate) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
-        self.mplace.meta.len(self.layout, cx)
+impl<'mir, 'tcx: 'mir, Prov: Provenance + 'static> Projectable<'mir, 'tcx, Prov>
+    for MPlaceTy<'tcx, Prov>
+{
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    fn meta<M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+        Ok(self.meta)
+    }
+
+    fn offset_with_meta(
+        &self,
+        offset: Size,
+        meta: MemPlaceMeta<Prov>,
+        layout: TyAndLayout<'tcx>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, Self> {
+        Ok(MPlaceTy {
+            mplace: self.mplace.offset_with_meta_(offset, meta, cx)?,
+            align: self.align.restrict_for_offset(offset),
+            layout,
+        })
+    }
+
+    fn to_op<M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        Ok(self.into())
     }
 }
 
@@ -293,7 +290,7 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
     }
 }
 
-impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance + 'static> PlaceTy<'tcx, Prov> {
     /// A place is either an mplace or some local.
     #[inline]
     pub fn as_mplace_or_local(
@@ -315,11 +312,24 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
             )
         })
     }
+}
 
-    /// Offset the place in memory and change its metadata.
-    ///
-    /// This can go wrong very easily if you give the wrong layout for the new place!
-    pub(crate) fn offset_with_meta(
+impl<'mir, 'tcx: 'mir, Prov: Provenance + 'static> Projectable<'mir, 'tcx, Prov>
+    for PlaceTy<'tcx, Prov>
+{
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    fn meta<M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
+        ecx.place_meta(self)
+    }
+
+    fn offset_with_meta(
         &self,
         offset: Size,
         meta: MemPlaceMeta<Prov>,
@@ -346,17 +356,11 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
         })
     }
 
-    /// Offset the place in memory.
-    ///
-    /// This can go wrong very easily if you give the wrong layout for the new place!
-    pub fn offset(
+    fn to_op<M: Machine<'mir, 'tcx, Provenance = Prov>>(
         &self,
-        offset: Size,
-        layout: TyAndLayout<'tcx>,
-        cx: &impl HasDataLayout,
-    ) -> InterpResult<'tcx, Self> {
-        assert!(layout.is_sized());
-        self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+        ecx.place_to_op(self)
     }
 }
 
@@ -506,7 +510,7 @@ where
         let mut place = self.local_to_place(self.frame_idx(), mir_place.local)?;
         // Using `try_fold` turned out to be bad for performance, hence the loop.
         for elem in mir_place.projection.iter() {
-            place = self.place_projection(&place, elem)?
+            place = self.project(&place, elem)?
         }
 
         trace!("{:?}", self.dump_place(place.place));
@@ -849,7 +853,7 @@ where
                     &mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
                 };
                 if let Some(offset) = offset {
-                    whole_local.offset(offset, self)?
+                    whole_local.offset_with_meta_(offset, MemPlaceMeta::None, self)?
                 } else {
                     // Preserve wide place metadata, do not call `offset`.
                     whole_local
@@ -902,7 +906,7 @@ where
         self.write_uninit(&dest)?;
         let (variant_index, variant_dest, active_field_index) = match *kind {
             mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
-                let variant_dest = self.place_downcast(&dest, variant_index)?;
+                let variant_dest = self.project_downcast(dest, variant_index)?;
                 (variant_index, variant_dest, active_field_index)
             }
             _ => (FIRST_VARIANT, dest.clone(), None),
@@ -912,7 +916,7 @@ where
         }
         for (field_index, operand) in operands.iter_enumerated() {
             let field_index = active_field_index.unwrap_or(field_index);
-            let field_dest = self.place_field(&variant_dest, field_index.as_usize())?;
+            let field_dest = self.project_field(&variant_dest, field_index.as_usize())?;
             let op = self.eval_operand(operand, Some(field_dest.layout))?;
             self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
         }
@@ -952,22 +956,24 @@ where
         Ok((mplace, vtable))
     }
 
-    /// Turn an operand with a `dyn* Trait` type into an operand with the actual dynamic type.
-    /// Aso returns the vtable.
-    pub(super) fn unpack_dyn_star(
+    /// Turn a `dyn* Trait` type into an value with the actual dynamic type.
+    /// Also returns the vtable.
+    pub(super) fn unpack_dyn_star<P: Projectable<'mir, 'tcx, M::Provenance>>(
         &self,
-        op: &OpTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, (OpTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> {
+        val: &P,
+    ) -> InterpResult<'tcx, (P, Pointer<Option<M::Provenance>>)> {
         assert!(
-            matches!(op.layout.ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
+            matches!(val.layout().ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
             "`unpack_dyn_star` only makes sense on `dyn*` types"
         );
-        let data = self.operand_field(&op, 0)?;
-        let vtable = self.operand_field(&op, 1)?;
-        let vtable = self.read_pointer(&vtable)?;
+        let data = self.project_field(val, 0)?;
+        let vtable = self.project_field(val, 1)?;
+        let vtable = self.read_pointer(&vtable.to_op(self)?)?;
         let (ty, _) = self.get_ptr_vtable(vtable)?;
         let layout = self.layout_of(ty)?;
-        let data = data.offset(Size::ZERO, layout, self)?;
+        // `data` is already the right thing but has the wrong type. So we transmute it, by
+        // projecting with offset 0.
+        let data = data.transmute(layout, self)?;
         Ok((data, vtable))
     }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index 5a3f19e5dc9..ddcbc8350aa 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -11,12 +11,67 @@ use rustc_middle::mir;
 use rustc_middle::ty;
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::Ty;
+use rustc_middle::ty::TyCtxt;
+use rustc_target::abi::HasDataLayout;
 use rustc_target::abi::Size;
 use rustc_target::abi::{self, VariantIdx};
 
-use super::{
-    InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, PlaceTy, Provenance, Scalar,
-};
+use super::MPlaceTy;
+use super::{InterpCx, InterpResult, Machine, MemPlaceMeta, OpTy, Provenance, Scalar};
+
+/// A thing that we can project into, and that has a layout.
+pub trait Projectable<'mir, 'tcx: 'mir, Prov: Provenance>: Sized {
+    /// Get the layout.
+    fn layout(&self) -> TyAndLayout<'tcx>;
+
+    /// Get the metadata of a wide value.
+    fn meta<M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>>;
+
+    fn len<M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, u64> {
+        self.meta(ecx)?.len(self.layout(), ecx)
+    }
+
+    /// Offset the value by the given amount, replacing the layout and metadata.
+    fn offset_with_meta(
+        &self,
+        offset: Size,
+        meta: MemPlaceMeta<Prov>,
+        layout: TyAndLayout<'tcx>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, Self>;
+
+    fn offset(
+        &self,
+        offset: Size,
+        layout: TyAndLayout<'tcx>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, Self> {
+        assert!(layout.is_sized());
+        self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+    }
+
+    fn transmute(
+        &self,
+        layout: TyAndLayout<'tcx>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, Self> {
+        assert_eq!(self.layout().size, layout.size);
+        self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, cx)
+    }
+
+    /// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for
+    /// reading from this thing.
+    fn to_op<M: Machine<'mir, 'tcx, Provenance = Prov>>(
+        &self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
+}
 
 // FIXME: Working around https://github.com/rust-lang/rust/issues/54385
 impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
@@ -24,24 +79,33 @@ where
     Prov: Provenance + 'static,
     M: Machine<'mir, 'tcx, Provenance = Prov>,
 {
-    //# Field access
-
-    fn project_field(
+    /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
+    /// always possible without allocating, so it can take `&self`. Also return the field's layout.
+    /// This supports both struct and array fields, but not slices!
+    ///
+    /// This also works for arrays, but then the `usize` index type is restricting.
+    /// For indexing into arrays, use `mplace_index`.
+    pub fn project_field<P: Projectable<'mir, 'tcx, M::Provenance>>(
         &self,
-        base_layout: TyAndLayout<'tcx>,
-        base_meta: MemPlaceMeta<M::Provenance>,
+        base: &P,
         field: usize,
-    ) -> InterpResult<'tcx, (Size, MemPlaceMeta<M::Provenance>, TyAndLayout<'tcx>)> {
-        let offset = base_layout.fields.offset(field);
-        let field_layout = base_layout.field(self, field);
+    ) -> InterpResult<'tcx, P> {
+        // Slices nominally have length 0, so they will panic somewhere in `fields.offset`.
+        debug_assert!(
+            !matches!(base.layout().ty.kind(), ty::Slice(..)),
+            "`field` projection called on a slice -- call `index` projection instead"
+        );
+        let offset = base.layout().fields.offset(field);
+        let field_layout = base.layout().field(self, field);
 
         // Offset may need adjustment for unsized fields.
         let (meta, offset) = if field_layout.is_unsized() {
-            if base_layout.is_sized() {
+            if base.layout().is_sized() {
                 // An unsized field of a sized type? Sure...
                 // But const-prop actually feeds us such nonsense MIR!
                 throw_inval!(ConstPropNonsense);
             }
+            let base_meta = base.meta(self)?;
             // Re-use parent metadata to determine dynamic field layout.
             // With custom DSTS, this *will* execute user-defined code, but the same
             // happens at run-time so that's okay.
@@ -60,189 +124,68 @@ where
             (MemPlaceMeta::None, offset)
         };
 
-        Ok((offset, meta, field_layout))
-    }
-
-    /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
-    /// always possible without allocating, so it can take `&self`. Also return the field's layout.
-    /// This supports both struct and array fields.
-    ///
-    /// This also works for arrays, but then the `usize` index type is restricting.
-    /// For indexing into arrays, use `mplace_index`.
-    pub fn mplace_field(
-        &self,
-        base: &MPlaceTy<'tcx, M::Provenance>,
-        field: usize,
-    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
-        let (offset, meta, field_layout) = self.project_field(base.layout, base.meta, field)?;
-
-        // We do not look at `base.layout.align` nor `field_layout.align`, unlike
-        // codegen -- mostly to see if we can get away with that
         base.offset_with_meta(offset, meta, field_layout, self)
     }
 
-    /// Gets the place of a field inside the place, and also the field's type.
-    pub fn place_field(
+    /// Downcasting to an enum variant.
+    pub fn project_downcast<P: Projectable<'mir, 'tcx, M::Provenance>>(
         &self,
-        base: &PlaceTy<'tcx, M::Provenance>,
-        field: usize,
-    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
-        let (offset, meta, field_layout) =
-            self.project_field(base.layout, self.place_meta(base)?, field)?;
-        base.offset_with_meta(offset, meta, field_layout, self)
-    }
-
-    pub fn operand_field(
-        &self,
-        base: &OpTy<'tcx, M::Provenance>,
-        field: usize,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        let (offset, meta, field_layout) = self.project_field(base.layout, base.meta()?, field)?;
-        base.offset_with_meta(offset, meta, field_layout, self)
-    }
-
-    //# Downcasting
-
-    pub fn mplace_downcast(
-        &self,
-        base: &MPlaceTy<'tcx, M::Provenance>,
+        base: &P,
         variant: VariantIdx,
-    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+    ) -> InterpResult<'tcx, P> {
+        assert!(!base.meta(self)?.has_meta());
         // Downcasts only change the layout.
         // (In particular, no check about whether this is even the active variant -- that's by design,
         // see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
-        assert!(!base.meta.has_meta());
-        let mut base = *base;
-        base.layout = base.layout.for_variant(self, variant);
-        Ok(base)
-    }
-
-    pub fn place_downcast(
-        &self,
-        base: &PlaceTy<'tcx, M::Provenance>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
-        // Downcast just changes the layout
-        let mut base = base.clone();
-        base.layout = base.layout.for_variant(self, variant);
-        Ok(base)
-    }
-
-    pub fn operand_downcast(
-        &self,
-        base: &OpTy<'tcx, M::Provenance>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        // Downcast just changes the layout
-        let mut base = base.clone();
-        base.layout = base.layout.for_variant(self, variant);
-        Ok(base)
+        // So we just "offset" by 0.
+        let layout = base.layout().for_variant(self, variant);
+        if layout.abi.is_uninhabited() {
+            // `read_discriminant` should have excluded uninhabited variants... but ConstProp calls
+            // us on dead code.
+            throw_inval!(ConstPropNonsense)
+        }
+        // This cannot be `transmute` as variants *can* have a smaller size than the entire enum.
+        base.offset(Size::ZERO, layout, self)
     }
 
-    //# Slice and array indexing
-
     /// Compute the offset and field layout for accessing the given index.
-    fn project_index(
+    pub fn project_index<P: Projectable<'mir, 'tcx, M::Provenance>>(
         &self,
-        base_layout: TyAndLayout<'tcx>,
-        base_meta: MemPlaceMeta<M::Provenance>,
+        base: &P,
         index: u64,
-    ) -> InterpResult<'tcx, (Size, TyAndLayout<'tcx>)> {
+    ) -> InterpResult<'tcx, P> {
         // Not using the layout method because we want to compute on u64
-        match base_layout.fields {
+        let (offset, field_layout) = match base.layout().fields {
             abi::FieldsShape::Array { stride, count: _ } => {
                 // `count` is nonsense for slices, use the dynamic length instead.
-                let len = base_meta.len(base_layout, self)?;
+                let len = base.len(self)?;
                 if index >= len {
                     // This can only be reached in ConstProp and non-rustc-MIR.
                     throw_ub!(BoundsCheckFailed { len, index });
                 }
                 let offset = stride * index; // `Size` multiplication
                 // All fields have the same layout.
-                let field_layout = base_layout.field(self, 0);
-                Ok((offset, field_layout))
+                let field_layout = base.layout().field(self, 0);
+                (offset, field_layout)
             }
             _ => span_bug!(
                 self.cur_span(),
                 "`mplace_index` called on non-array type {:?}",
-                base_layout.ty
+                base.layout().ty
             ),
-        }
-    }
-
-    #[inline(always)]
-    pub fn operand_index(
-        &self,
-        base: &OpTy<'tcx, M::Provenance>,
-        index: u64,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        let (offset, field_layout) = self.project_index(base.layout, base.meta()?, index)?;
-        base.offset(offset, field_layout, self)
-    }
-
-    /// Index into an array.
-    pub fn mplace_index(
-        &self,
-        base: &MPlaceTy<'tcx, M::Provenance>,
-        index: u64,
-    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
-        let (offset, field_layout) = self.project_index(base.layout, base.meta, index)?;
-        base.offset(offset, field_layout, self)
-    }
-
-    pub fn place_index(
-        &self,
-        base: &PlaceTy<'tcx, M::Provenance>,
-        index: u64,
-    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
-        let (offset, field_layout) =
-            self.project_index(base.layout, self.place_meta(base)?, index)?;
-        base.offset(offset, field_layout, self)
-    }
-
-    /// Iterates over all fields of an array. Much more efficient than doing the
-    /// same by repeatedly calling `operand_index`.
-    pub fn operand_array_fields<'a>(
-        &self,
-        base: &'a OpTy<'tcx, Prov>,
-    ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, OpTy<'tcx, Prov>>> + 'a> {
-        let abi::FieldsShape::Array { stride, .. } = base.layout.fields else {
-            span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
         };
-        let len = base.len(self)?;
-        let field_layout = base.layout.field(self, 0);
-        let dl = &self.tcx.data_layout;
-        // `Size` multiplication
-        Ok((0..len).map(move |i| base.offset(stride * i, field_layout, dl)))
-    }
 
-    /// Iterates over all fields of an array. Much more efficient than doing the
-    /// same by repeatedly calling `place_index`.
-    pub fn place_array_fields<'a>(
-        &self,
-        base: &'a PlaceTy<'tcx, Prov>,
-    ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, PlaceTy<'tcx, Prov>>> + 'a> {
-        let abi::FieldsShape::Array { stride, .. } = base.layout.fields else {
-            span_bug!(self.cur_span(), "place_array_fields: expected an array layout");
-        };
-        let len = self.place_meta(base)?.len(base.layout, self)?;
-        let field_layout = base.layout.field(self, 0);
-        let dl = &self.tcx.data_layout;
-        // `Size` multiplication
-        Ok((0..len).map(move |i| base.offset(stride * i, field_layout, dl)))
+        base.offset(offset, field_layout, self)
     }
 
-    //# ConstantIndex support
-
-    fn project_constant_index(
+    fn project_constant_index<P: Projectable<'mir, 'tcx, M::Provenance>>(
         &self,
-        base_layout: TyAndLayout<'tcx>,
-        base_meta: MemPlaceMeta<M::Provenance>,
+        base: &P,
         offset: u64,
         min_length: u64,
         from_end: bool,
-    ) -> InterpResult<'tcx, (Size, TyAndLayout<'tcx>)> {
-        let n = base_meta.len(base_layout, self)?;
+    ) -> InterpResult<'tcx, P> {
+        let n = base.len(self)?;
         if n < min_length {
             // This can only be reached in ConstProp and non-rustc-MIR.
             throw_ub!(BoundsCheckFailed { len: min_length, index: n });
@@ -256,49 +199,39 @@ where
             offset
         };
 
-        self.project_index(base_layout, base_meta, index)
-    }
-
-    fn operand_constant_index(
-        &self,
-        base: &OpTy<'tcx, M::Provenance>,
-        offset: u64,
-        min_length: u64,
-        from_end: bool,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        let (offset, layout) =
-            self.project_constant_index(base.layout, base.meta()?, offset, min_length, from_end)?;
-        base.offset(offset, layout, self)
+        self.project_index(base, index)
     }
 
-    fn place_constant_index(
+    /// Iterates over all fields of an array. Much more efficient than doing the
+    /// same by repeatedly calling `operand_index`.
+    pub fn project_array_fields<'a, P: Projectable<'mir, 'tcx, M::Provenance>>(
         &self,
-        base: &PlaceTy<'tcx, M::Provenance>,
-        offset: u64,
-        min_length: u64,
-        from_end: bool,
-    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
-        let (offset, layout) = self.project_constant_index(
-            base.layout,
-            self.place_meta(base)?,
-            offset,
-            min_length,
-            from_end,
-        )?;
-        base.offset(offset, layout, self)
+        base: &'a P,
+    ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, P>> + 'a>
+    where
+        'tcx: 'a,
+    {
+        let abi::FieldsShape::Array { stride, .. } = base.layout().fields else {
+            span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
+        };
+        let len = base.len(self)?;
+        let field_layout = base.layout().field(self, 0);
+        let tcx: TyCtxt<'tcx> = *self.tcx;
+        // `Size` multiplication
+        Ok((0..len).map(move |i| {
+            base.offset_with_meta(stride * i, MemPlaceMeta::None, field_layout, &tcx)
+        }))
     }
 
-    //# Subslicing
-
-    fn project_subslice(
+    /// Subslicing
+    fn project_subslice<P: Projectable<'mir, 'tcx, M::Provenance>>(
         &self,
-        base_layout: TyAndLayout<'tcx>,
-        base_meta: MemPlaceMeta<M::Provenance>,
+        base: &P,
         from: u64,
         to: u64,
         from_end: bool,
-    ) -> InterpResult<'tcx, (Size, MemPlaceMeta<M::Provenance>, TyAndLayout<'tcx>)> {
-        let len = base_meta.len(base_layout, self)?; // also asserts that we have a type where this makes sense
+    ) -> InterpResult<'tcx, P> {
+        let len = base.len(self)?; // also asserts that we have a type where this makes sense
         let actual_to = if from_end {
             if from.checked_add(to).map_or(true, |to| to > len) {
                 // This can only be reached in ConstProp and non-rustc-MIR.
@@ -311,16 +244,20 @@ where
 
         // Not using layout method because that works with usize, and does not work with slices
         // (that have count 0 in their layout).
-        let from_offset = match base_layout.fields {
+        let from_offset = match base.layout().fields {
             abi::FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked
             _ => {
-                span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base_layout)
+                span_bug!(
+                    self.cur_span(),
+                    "unexpected layout of index access: {:#?}",
+                    base.layout()
+                )
             }
         };
 
         // Compute meta and new layout
         let inner_len = actual_to.checked_sub(from).unwrap();
-        let (meta, ty) = match base_layout.ty.kind() {
+        let (meta, ty) = match base.layout().ty.kind() {
             // It is not nice to match on the type, but that seems to be the only way to
             // implement this.
             ty::Array(inner, _) => {
@@ -328,98 +265,45 @@ where
             }
             ty::Slice(..) => {
                 let len = Scalar::from_target_usize(inner_len, self);
-                (MemPlaceMeta::Meta(len), base_layout.ty)
+                (MemPlaceMeta::Meta(len), base.layout().ty)
             }
             _ => {
-                span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base_layout.ty)
+                span_bug!(
+                    self.cur_span(),
+                    "cannot subslice non-array type: `{:?}`",
+                    base.layout().ty
+                )
             }
         };
         let layout = self.layout_of(ty)?;
-        Ok((from_offset, meta, layout))
-    }
-
-    fn operand_subslice(
-        &self,
-        base: &OpTy<'tcx, M::Provenance>,
-        from: u64,
-        to: u64,
-        from_end: bool,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        let (from_offset, meta, layout) =
-            self.project_subslice(base.layout, base.meta()?, from, to, from_end)?;
-        base.offset_with_meta(from_offset, meta, layout, self)
-    }
 
-    pub fn place_subslice(
-        &self,
-        base: &PlaceTy<'tcx, M::Provenance>,
-        from: u64,
-        to: u64,
-        from_end: bool,
-    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
-        let (from_offset, meta, layout) =
-            self.project_subslice(base.layout, self.place_meta(base)?, from, to, from_end)?;
         base.offset_with_meta(from_offset, meta, layout, self)
     }
 
-    //# Applying a general projection
-
-    /// Projects into a place.
-    #[instrument(skip(self), level = "trace")]
-    pub fn place_projection(
-        &self,
-        base: &PlaceTy<'tcx, M::Provenance>,
-        proj_elem: mir::PlaceElem<'tcx>,
-    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
-        use rustc_middle::mir::ProjectionElem::*;
-        Ok(match proj_elem {
-            OpaqueCast(ty) => {
-                let mut place = base.clone();
-                place.layout = self.layout_of(ty)?;
-                place
-            }
-            Field(field, _) => self.place_field(base, field.index())?,
-            Downcast(_, variant) => self.place_downcast(base, variant)?,
-            Deref => self.deref_operand(&self.place_to_op(base)?)?.into(),
-            Index(local) => {
-                let layout = self.layout_of(self.tcx.types.usize)?;
-                let n = self.local_to_op(self.frame(), local, Some(layout))?;
-                let n = self.read_target_usize(&n)?;
-                self.place_index(base, n)?
-            }
-            ConstantIndex { offset, min_length, from_end } => {
-                self.place_constant_index(base, offset, min_length, from_end)?
-            }
-            Subslice { from, to, from_end } => self.place_subslice(base, from, to, from_end)?,
-        })
-    }
-
+    /// Applying a general projection
     #[instrument(skip(self), level = "trace")]
-    pub fn operand_projection(
-        &self,
-        base: &OpTy<'tcx, M::Provenance>,
-        proj_elem: mir::PlaceElem<'tcx>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+    pub fn project<P>(&self, base: &P, proj_elem: mir::PlaceElem<'tcx>) -> InterpResult<'tcx, P>
+    where
+        P: Projectable<'mir, 'tcx, M::Provenance>
+            + From<MPlaceTy<'tcx, M::Provenance>>
+            + std::fmt::Debug,
+    {
         use rustc_middle::mir::ProjectionElem::*;
         Ok(match proj_elem {
-            OpaqueCast(ty) => {
-                let mut op = base.clone();
-                op.layout = self.layout_of(ty)?;
-                op
-            }
-            Field(field, _) => self.operand_field(base, field.index())?,
-            Downcast(_, variant) => self.operand_downcast(base, variant)?,
-            Deref => self.deref_operand(base)?.into(),
+            OpaqueCast(ty) => base.transmute(self.layout_of(ty)?, self)?,
+            Field(field, _) => self.project_field(base, field.index())?,
+            Downcast(_, variant) => self.project_downcast(base, variant)?,
+            Deref => self.deref_operand(&base.to_op(self)?)?.into(),
             Index(local) => {
                 let layout = self.layout_of(self.tcx.types.usize)?;
                 let n = self.local_to_op(self.frame(), local, Some(layout))?;
                 let n = self.read_target_usize(&n)?;
-                self.operand_index(base, n)?
+                self.project_index(base, n)?
             }
             ConstantIndex { offset, min_length, from_end } => {
-                self.operand_constant_index(base, offset, min_length, from_end)?
+                self.project_constant_index(base, offset, min_length, from_end)?
             }
-            Subslice { from, to, from_end } => self.operand_subslice(base, from, to, from_end)?,
+            Subslice { from, to, from_end } => self.project_subslice(base, from, to, from_end)?,
         })
     }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index 619da8abb7d..319c422134c 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -8,7 +8,7 @@ use rustc_middle::mir;
 use rustc_middle::mir::interpret::{InterpResult, Scalar};
 use rustc_middle::ty::layout::LayoutOf;
 
-use super::{ImmTy, InterpCx, Machine};
+use super::{ImmTy, InterpCx, Machine, Projectable};
 use crate::util;
 
 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
@@ -197,7 +197,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     self.get_place_alloc_mut(&dest)?;
                 } else {
                     // Write the src to the first element.
-                    let first = self.mplace_field(&dest, 0)?;
+                    let first = self.project_index(&dest, 0)?;
                     self.copy_op(&src, &first.into(), /*allow_transmute*/ false)?;
 
                     // This is performance-sensitive code for big static/const arrays! So we
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index f5e38570330..f934cca2517 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -65,8 +65,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         field: usize,
     ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
         Ok(match arg {
-            FnArg::Copy(op) => FnArg::Copy(self.operand_field(op, field)?),
-            FnArg::InPlace(place) => FnArg::InPlace(self.place_field(place, field)?),
+            FnArg::Copy(op) => FnArg::Copy(self.project_field(op, field)?),
+            FnArg::InPlace(place) => FnArg::InPlace(self.project_field(place, field)?),
         })
     }
 
@@ -382,8 +382,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             // This all has to be in memory, there are no immediate unsized values.
             let src = caller_arg_copy.assert_mem_place();
             // The destination cannot be one of these "spread args".
-            let (dest_frame, dest_local, dest_offset) =
-                callee_arg.as_mplace_or_local().right().expect("calee fn arguments must be locals");
+            let (dest_frame, dest_local, dest_offset) = callee_arg
+                .as_mplace_or_local()
+                .right()
+                .expect("callee fn arguments must be locals");
             // We are just initializing things, so there can't be anything here yet.
             assert!(matches!(
                 *self.local_to_op(&self.stack()[dest_frame], dest_local, None)?,
@@ -597,7 +599,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         if Some(local) == body.spread_arg {
                             // Must be a tuple
                             for i in 0..dest.layout.fields.count() {
-                                let dest = self.place_field(&dest, i)?;
+                                let dest = self.project_field(&dest, i)?;
                                 let callee_abi = callee_args_abis.next().unwrap();
                                 self.pass_argument(&mut caller_args, callee_abi, &dest)?;
                             }
@@ -679,7 +681,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                             // Not there yet, search for the only non-ZST field.
                             let mut non_zst_field = None;
                             for i in 0..receiver.layout.fields.count() {
-                                let field = self.operand_field(&receiver, i)?;
+                                let field = self.project_field(&receiver, i)?;
                                 let zst =
                                     field.layout.is_zst() && field.layout.align.abi.bytes() == 1;
                                 if !zst {
@@ -705,12 +707,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let (vptr, dyn_ty, adjusted_receiver) = if let ty::Dynamic(data, _, ty::DynStar) =
                     receiver_place.layout.ty.kind()
                 {
-                    let (recv, vptr) = self.unpack_dyn_star(&receiver_place.into())?;
+                    let (recv, vptr) = self.unpack_dyn_star(&receiver_place)?;
                     let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
                     if dyn_trait != data.principal() {
                         throw_ub_custom!(fluent::const_eval_dyn_star_call_vtable_mismatch);
                     }
-                    let recv = recv.assert_mem_place(); // we passed an MPlaceTy to `unpack_dyn_star` so we definitely still have one
 
                     (vptr, dyn_ty, recv.ptr)
                 } else {
@@ -838,7 +839,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
             ty::Dynamic(_, _, ty::DynStar) => {
                 // Dropping a `dyn*`. Need to find actual drop fn.
-                self.unpack_dyn_star(&place.into())?.0.assert_mem_place()
+                self.unpack_dyn_star(&place)?.0
             }
             _ => {
                 debug_assert_eq!(
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 21c655988a0..6618c70ac75 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -29,7 +29,7 @@ use std::hash::Hash;
 use super::UndefinedBehaviorInfo::*;
 use super::{
     AllocId, CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy,
-    Machine, MemPlaceMeta, OpTy, Pointer, Scalar, ValueVisitor,
+    Machine, MemPlaceMeta, OpTy, Pointer, Projectable, Scalar, ValueVisitor,
 };
 
 macro_rules! throw_validation_failure {
@@ -462,6 +462,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
 
     /// Check if this is a value of primitive type, and if yes check the validity of the value
     /// at that type. Return `true` if the type is indeed primitive.
+    ///
+    /// Note that not all of these have `FieldsShape::Primitive`, e.g. wide references.
     fn try_visit_primitive(
         &mut self,
         value: &OpTy<'tcx, M::Provenance>,
@@ -655,15 +657,14 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
     ) -> InterpResult<'tcx, VariantIdx> {
         self.with_elem(PathElem::EnumTag, move |this| {
             Ok(try_validation!(
-                this.ecx.read_discriminant(op),
+                this.ecx.read_discriminant(op).map(|(_, idx)| idx),
                 this.path,
                 InvalidTag(val) => InvalidEnumTag {
                     value: format!("{val:x}"),
                 },
-
+                UninhabitedEnumVariantRead(_) => UninhabitedEnumTag,
                 InvalidUninitBytes(None) => UninitEnumTag,
-            )
-            .1)
+            ))
         })
     }
 
@@ -733,60 +734,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
             }
         }
 
-        // Recursively walk the value at its type.
-        self.walk_value(op)?;
-
-        // *After* all of this, check the ABI. We need to check the ABI to handle
-        // types like `NonNull` where the `Scalar` info is more restrictive than what
-        // the fields say (`rustc_layout_scalar_valid_range_start`).
-        // But in most cases, this will just propagate what the fields say,
-        // and then we want the error to point at the field -- so, first recurse,
-        // then check ABI.
-        //
-        // FIXME: We could avoid some redundant checks here. For newtypes wrapping
-        // scalars, we do the same check on every "level" (e.g., first we check
-        // MyNewtype and then the scalar in there).
-        match op.layout.abi {
-            Abi::Uninhabited => {
-                let ty = op.layout.ty;
-                throw_validation_failure!(self.path, UninhabitedVal { ty });
-            }
-            Abi::Scalar(scalar_layout) => {
-                if !scalar_layout.is_uninit_valid() {
-                    // There is something to check here.
-                    let scalar = self.read_scalar(op, ExpectedKind::InitScalar)?;
-                    self.visit_scalar(scalar, scalar_layout)?;
-                }
-            }
-            Abi::ScalarPair(a_layout, b_layout) => {
-                // We can only proceed if *both* scalars need to be initialized.
-                // FIXME: find a way to also check ScalarPair when one side can be uninit but
-                // the other must be init.
-                if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
-                    let (a, b) =
-                        self.read_immediate(op, ExpectedKind::InitScalar)?.to_scalar_pair();
-                    self.visit_scalar(a, a_layout)?;
-                    self.visit_scalar(b, b_layout)?;
-                }
-            }
-            Abi::Vector { .. } => {
-                // No checks here, we assume layout computation gets this right.
-                // (This is harder to check since Miri does not represent these as `Immediate`. We
-                // also cannot use field projections since this might be a newtype around a vector.)
-            }
-            Abi::Aggregate { .. } => {
-                // Nothing to do.
-            }
-        }
-
-        Ok(())
-    }
-
-    fn visit_aggregate(
-        &mut self,
-        op: &OpTy<'tcx, M::Provenance>,
-        fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
-    ) -> InterpResult<'tcx> {
+        // Recursively walk the value at its type. Apply optimizations for some large types.
         match op.layout.ty.kind() {
             ty::Str => {
                 let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate
@@ -874,12 +822,58 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
             // ZST type, so either validation fails for all elements or none.
             ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(*tys)?.is_zst() => {
                 // Validate just the first element (if any).
-                self.walk_aggregate(op, fields.take(1))?
+                if op.len(self.ecx)? > 0 {
+                    self.visit_field(op, 0, &self.ecx.project_index(op, 0)?)?;
+                }
             }
             _ => {
-                self.walk_aggregate(op, fields)? // default handler
+                self.walk_value(op)?; // default handler
             }
         }
+
+        // *After* all of this, check the ABI. We need to check the ABI to handle
+        // types like `NonNull` where the `Scalar` info is more restrictive than what
+        // the fields say (`rustc_layout_scalar_valid_range_start`).
+        // But in most cases, this will just propagate what the fields say,
+        // and then we want the error to point at the field -- so, first recurse,
+        // then check ABI.
+        //
+        // FIXME: We could avoid some redundant checks here. For newtypes wrapping
+        // scalars, we do the same check on every "level" (e.g., first we check
+        // MyNewtype and then the scalar in there).
+        match op.layout.abi {
+            Abi::Uninhabited => {
+                let ty = op.layout.ty;
+                throw_validation_failure!(self.path, UninhabitedVal { ty });
+            }
+            Abi::Scalar(scalar_layout) => {
+                if !scalar_layout.is_uninit_valid() {
+                    // There is something to check here.
+                    let scalar = self.read_scalar(op, ExpectedKind::InitScalar)?;
+                    self.visit_scalar(scalar, scalar_layout)?;
+                }
+            }
+            Abi::ScalarPair(a_layout, b_layout) => {
+                // We can only proceed if *both* scalars need to be initialized.
+                // FIXME: find a way to also check ScalarPair when one side can be uninit but
+                // the other must be init.
+                if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
+                    let (a, b) =
+                        self.read_immediate(op, ExpectedKind::InitScalar)?.to_scalar_pair();
+                    self.visit_scalar(a, a_layout)?;
+                    self.visit_scalar(b, b_layout)?;
+                }
+            }
+            Abi::Vector { .. } => {
+                // No checks here, we assume layout computation gets this right.
+                // (This is harder to check since Miri does not represent these as `Immediate`. We
+                // also cannot use field projections since this might be a newtype around a vector.)
+            }
+            Abi::Aggregate { .. } => {
+                // Nothing to do.
+            }
+        }
+
         Ok(())
     }
 }
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
index 8d7c1953abe..a50233fa3de 100644
--- a/compiler/rustc_const_eval/src/interpret/visitor.rs
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -1,544 +1,204 @@
 //! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound
 //! types until we arrive at the leaves, with custom handling for primitive types.
 
+use rustc_index::IndexVec;
 use rustc_middle::mir::interpret::InterpResult;
 use rustc_middle::ty;
-use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::FieldIdx;
 use rustc_target::abi::{FieldsShape, VariantIdx, Variants};
 
 use std::num::NonZeroUsize;
 
-use super::{InterpCx, MPlaceTy, Machine, OpTy, PlaceTy};
+use super::{InterpCx, MPlaceTy, Machine, Projectable};
 
-/// A thing that we can project into, and that has a layout.
-/// This wouldn't have to depend on `Machine` but with the current type inference,
-/// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
-pub trait Value<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
-    /// Gets this value's layout.
-    fn layout(&self) -> TyAndLayout<'tcx>;
+/// How to traverse a value and what to do when we are at the leaves.
+pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
+    type V: Projectable<'mir, 'tcx, M::Provenance>
+        + From<MPlaceTy<'tcx, M::Provenance>>
+        + std::fmt::Debug;
 
-    /// Makes this into an `OpTy`, in a cheap way that is good for reading.
-    fn to_op_for_read(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
-
-    /// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
-    fn to_op_for_proj(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        self.to_op_for_read(ecx)
-    }
-
-    /// Creates this from an `OpTy`.
-    ///
-    /// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
-    fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
-
-    /// Projects to the given enum variant.
-    fn project_downcast(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, Self>;
-
-    /// Projects to the n-th field.
-    fn project_field(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        field: usize,
-    ) -> InterpResult<'tcx, Self>;
-}
-
-/// A thing that we can project into given *mutable* access to `ecx`, and that has a layout.
-/// This wouldn't have to depend on `Machine` but with the current type inference,
-/// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
-pub trait ValueMut<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
-    /// Gets this value's layout.
-    fn layout(&self) -> TyAndLayout<'tcx>;
-
-    /// Makes this into an `OpTy`, in a cheap way that is good for reading.
-    fn to_op_for_read(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
-
-    /// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
-    fn to_op_for_proj(
-        &self,
-        ecx: &mut InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
-
-    /// Creates this from an `OpTy`.
-    ///
-    /// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
-    fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
-
-    /// Projects to the given enum variant.
-    fn project_downcast(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, Self>;
-
-    /// Projects to the n-th field.
-    fn project_field(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        field: usize,
-    ) -> InterpResult<'tcx, Self>;
-}
-
-// We cannot have a general impl which shows that Value implies ValueMut. (When we do, it says we
-// cannot `impl ValueMut for PlaceTy` because some downstream crate could `impl Value for PlaceTy`.)
-// So we have some copy-paste here. (We could have a macro but since we only have 2 types with this
-// double-impl, that would barely make the code shorter, if at all.)
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::Provenance> {
-    #[inline(always)]
-    fn layout(&self) -> TyAndLayout<'tcx> {
-        self.layout
-    }
-
-    #[inline(always)]
-    fn to_op_for_read(
-        &self,
-        _ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        Ok(self.clone())
-    }
-
-    #[inline(always)]
-    fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
-        op.clone()
-    }
-
-    #[inline(always)]
-    fn project_downcast(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.operand_downcast(self, variant)
-    }
-
-    #[inline(always)]
-    fn project_field(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        field: usize,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.operand_field(self, field)
-    }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
-    for OpTy<'tcx, M::Provenance>
-{
-    #[inline(always)]
-    fn layout(&self) -> TyAndLayout<'tcx> {
-        self.layout
-    }
-
-    #[inline(always)]
-    fn to_op_for_read(
-        &self,
-        _ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        Ok(self.clone())
-    }
-
-    #[inline(always)]
-    fn to_op_for_proj(
-        &self,
-        _ecx: &mut InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        Ok(self.clone())
-    }
-
-    #[inline(always)]
-    fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
-        op.clone()
-    }
-
-    #[inline(always)]
-    fn project_downcast(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.operand_downcast(self, variant)
-    }
-
-    #[inline(always)]
-    fn project_field(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        field: usize,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.operand_field(self, field)
-    }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M>
-    for MPlaceTy<'tcx, M::Provenance>
-{
-    #[inline(always)]
-    fn layout(&self) -> TyAndLayout<'tcx> {
-        self.layout
-    }
-
-    #[inline(always)]
-    fn to_op_for_read(
-        &self,
-        _ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        Ok(self.into())
-    }
-
-    #[inline(always)]
-    fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
-        // assert is justified because our `to_op_for_read` only ever produces `Indirect` operands.
-        op.assert_mem_place()
-    }
-
-    #[inline(always)]
-    fn project_downcast(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.mplace_downcast(self, variant)
-    }
-
-    #[inline(always)]
-    fn project_field(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        field: usize,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.mplace_field(self, field)
-    }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
-    for MPlaceTy<'tcx, M::Provenance>
-{
-    #[inline(always)]
-    fn layout(&self) -> TyAndLayout<'tcx> {
-        self.layout
-    }
-
-    #[inline(always)]
-    fn to_op_for_read(
-        &self,
-        _ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        Ok(self.into())
-    }
-
-    #[inline(always)]
-    fn to_op_for_proj(
-        &self,
-        _ecx: &mut InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        Ok(self.into())
-    }
-
-    #[inline(always)]
-    fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
-        // assert is justified because our `to_op_for_proj` only ever produces `Indirect` operands.
-        op.assert_mem_place()
-    }
-
-    #[inline(always)]
-    fn project_downcast(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.mplace_downcast(self, variant)
-    }
-
-    #[inline(always)]
-    fn project_field(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        field: usize,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.mplace_field(self, field)
-    }
-}
-
-impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
-    for PlaceTy<'tcx, M::Provenance>
-{
-    #[inline(always)]
-    fn layout(&self) -> TyAndLayout<'tcx> {
-        self.layout
-    }
-
-    #[inline(always)]
-    fn to_op_for_read(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        // No need for `force_allocation` since we are just going to read from this.
-        ecx.place_to_op(self)
-    }
+    /// The visitor must have an `InterpCx` in it.
+    fn ecx(&self) -> &InterpCx<'mir, 'tcx, M>;
 
+    /// `read_discriminant` can be hooked for better error messages.
     #[inline(always)]
-    fn to_op_for_proj(
-        &self,
-        ecx: &mut InterpCx<'mir, 'tcx, M>,
-    ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
-        // We `force_allocation` here so that `from_op` below can work.
-        Ok(ecx.force_allocation(self)?.into())
+    fn read_discriminant(&mut self, v: &Self::V) -> InterpResult<'tcx, VariantIdx> {
+        Ok(self.ecx().read_discriminant(&v.to_op(self.ecx())?)?.1)
     }
 
-    #[inline(always)]
-    fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
-        // assert is justified because our `to_op` only ever produces `Indirect` operands.
-        op.assert_mem_place().into()
-    }
-
-    #[inline(always)]
-    fn project_downcast(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        variant: VariantIdx,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.place_downcast(self, variant)
-    }
-
-    #[inline(always)]
-    fn project_field(
-        &self,
-        ecx: &InterpCx<'mir, 'tcx, M>,
-        field: usize,
-    ) -> InterpResult<'tcx, Self> {
-        ecx.place_field(self, field)
-    }
-}
-
-macro_rules! make_value_visitor {
-    ($visitor_trait:ident, $value_trait:ident, $($mutability:ident)?) => {
-        /// How to traverse a value and what to do when we are at the leaves.
-        pub trait $visitor_trait<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
-            type V: $value_trait<'mir, 'tcx, M>;
-
-            /// The visitor must have an `InterpCx` in it.
-            fn ecx(&$($mutability)? self)
-                -> &$($mutability)? InterpCx<'mir, 'tcx, M>;
-
-            /// `read_discriminant` can be hooked for better error messages.
-            #[inline(always)]
-            fn read_discriminant(
-                &mut self,
-                op: &OpTy<'tcx, M::Provenance>,
-            ) -> InterpResult<'tcx, VariantIdx> {
-                Ok(self.ecx().read_discriminant(op)?.1)
-            }
-
-            // Recursive actions, ready to be overloaded.
-            /// Visits the given value, dispatching as appropriate to more specialized visitors.
-            #[inline(always)]
-            fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
-            {
-                self.walk_value(v)
-            }
-            /// Visits the given value as a union. No automatic recursion can happen here.
-            #[inline(always)]
-            fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx>
-            {
-                Ok(())
-            }
-            /// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.
-            /// The type of `v` will be a raw pointer, but this is a field of `Box<T>` and the
-            /// pointee type is the actual `T`.
-            #[inline(always)]
-            fn visit_box(&mut self, _v: &Self::V) -> InterpResult<'tcx>
-            {
-                Ok(())
+    /// This function provides the chance to reorder the order in which fields are visited for
+    /// `FieldsShape::Aggregate`: The order of fields will be
+    /// `(0..num_fields).map(aggregate_field_order)`.
+    ///
+    /// The default means we iterate in source declaration order; alternative this can do an inverse
+    /// lookup in `memory_index` to use memory field order instead.
+    #[inline(always)]
+    fn aggregate_field_order(_memory_index: &IndexVec<FieldIdx, u32>, idx: usize) -> usize {
+        idx
+    }
+
+    // Recursive actions, ready to be overloaded.
+    /// Visits the given value, dispatching as appropriate to more specialized visitors.
+    #[inline(always)]
+    fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
+        self.walk_value(v)
+    }
+    /// Visits the given value as a union. No automatic recursion can happen here.
+    #[inline(always)]
+    fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx> {
+        Ok(())
+    }
+    /// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.
+    /// The type of `v` will be a raw pointer, but this is a field of `Box<T>` and the
+    /// pointee type is the actual `T`.
+    #[inline(always)]
+    fn visit_box(&mut self, _v: &Self::V) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Called each time we recurse down to a field of a "product-like" aggregate
+    /// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
+    /// and new (inner) value.
+    /// This gives the visitor the chance to track the stack of nested fields that
+    /// we are descending through.
+    #[inline(always)]
+    fn visit_field(
+        &mut self,
+        _old_val: &Self::V,
+        _field: usize,
+        new_val: &Self::V,
+    ) -> InterpResult<'tcx> {
+        self.visit_value(new_val)
+    }
+    /// Called when recursing into an enum variant.
+    /// This gives the visitor the chance to track the stack of nested fields that
+    /// we are descending through.
+    #[inline(always)]
+    fn visit_variant(
+        &mut self,
+        _old_val: &Self::V,
+        _variant: VariantIdx,
+        new_val: &Self::V,
+    ) -> InterpResult<'tcx> {
+        self.visit_value(new_val)
+    }
+
+    fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
+        let ty = v.layout().ty;
+        trace!("walk_value: type: {ty}");
+
+        // Special treatment for special types, where the (static) layout is not sufficient.
+        match *ty.kind() {
+            // If it is a trait object, switch to the real type that was used to create it.
+            ty::Dynamic(_, _, ty::Dyn) => {
+                // Dyn types. This is unsized, and the actual dynamic type of the data is given by the
+                // vtable stored in the place metadata.
+                // unsized values are never immediate, so we can assert_mem_place
+                let op = v.to_op(self.ecx())?;
+                let dest = op.assert_mem_place();
+                let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0;
+                trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
+                // recurse with the inner type
+                return self.visit_field(&v, 0, &inner_mplace.into());
             }
-            /// Visits this value as an aggregate, you are getting an iterator yielding
-            /// all the fields (still in an `InterpResult`, you have to do error handling yourself).
-            /// Recurses into the fields.
-            #[inline(always)]
-            fn visit_aggregate(
-                &mut self,
-                v: &Self::V,
-                fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
-            ) -> InterpResult<'tcx> {
-                self.walk_aggregate(v, fields)
+            ty::Dynamic(_, _, ty::DynStar) => {
+                // DynStar types. Very different from a dyn type (but strangely part of the
+                // same variant in `TyKind`): These are pairs where the 2nd component is the
+                // vtable, and the first component is the data (which must be ptr-sized).
+                let data = self.ecx().unpack_dyn_star(v)?.0;
+                return self.visit_field(&v, 0, &data);
             }
-
-            /// Called each time we recurse down to a field of a "product-like" aggregate
-            /// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
-            /// and new (inner) value.
-            /// This gives the visitor the chance to track the stack of nested fields that
-            /// we are descending through.
-            #[inline(always)]
-            fn visit_field(
-                &mut self,
-                _old_val: &Self::V,
-                _field: usize,
-                new_val: &Self::V,
-            ) -> InterpResult<'tcx> {
-                self.visit_value(new_val)
+            // Slices do not need special handling here: they have `Array` field
+            // placement with length 0, so we enter the `Array` case below which
+            // indirectly uses the metadata to determine the actual length.
+
+            // However, `Box`... let's talk about `Box`.
+            ty::Adt(def, ..) if def.is_box() => {
+                // `Box` is a hybrid primitive-library-defined type that one the one hand is
+                // a dereferenceable pointer, on the other hand has *basically arbitrary
+                // user-defined layout* since the user controls the 'allocator' field. So it
+                // cannot be treated like a normal pointer, since it does not fit into an
+                // `Immediate`. Yeah, it is quite terrible. But many visitors want to do
+                // something with "all boxed pointers", so we handle this mess for them.
+                //
+                // When we hit a `Box`, we do not do the usual field recursion; instead,
+                // we (a) call `visit_box` on the pointer value, and (b) recurse on the
+                // allocator field. We also assert tons of things to ensure we do not miss
+                // any other fields.
+
+                // `Box` has two fields: the pointer we care about, and the allocator.
+                assert_eq!(v.layout().fields.count(), 2, "`Box` must have exactly 2 fields");
+                let (unique_ptr, alloc) =
+                    (self.ecx().project_field(v, 0)?, self.ecx().project_field(v, 1)?);
+                // Unfortunately there is some type junk in the way here: `unique_ptr` is a `Unique`...
+                // (which means another 2 fields, the second of which is a `PhantomData`)
+                assert_eq!(unique_ptr.layout().fields.count(), 2);
+                let (nonnull_ptr, phantom) = (
+                    self.ecx().project_field(&unique_ptr, 0)?,
+                    self.ecx().project_field(&unique_ptr, 1)?,
+                );
+                assert!(
+                    phantom.layout().ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()),
+                    "2nd field of `Unique` should be PhantomData but is {:?}",
+                    phantom.layout().ty,
+                );
+                // ... that contains a `NonNull`... (gladly, only a single field here)
+                assert_eq!(nonnull_ptr.layout().fields.count(), 1);
+                let raw_ptr = self.ecx().project_field(&nonnull_ptr, 0)?; // the actual raw ptr
+                // ... whose only field finally is a raw ptr we can dereference.
+                self.visit_box(&raw_ptr)?;
+
+                // The second `Box` field is the allocator, which we recursively check for validity
+                // like in regular structs.
+                self.visit_field(v, 1, &alloc)?;
+
+                // We visited all parts of this one.
+                return Ok(());
             }
-            /// Called when recursing into an enum variant.
-            /// This gives the visitor the chance to track the stack of nested fields that
-            /// we are descending through.
-            #[inline(always)]
-            fn visit_variant(
-                &mut self,
-                _old_val: &Self::V,
-                _variant: VariantIdx,
-                new_val: &Self::V,
-            ) -> InterpResult<'tcx> {
-                self.visit_value(new_val)
+            _ => {}
+        };
+
+        // Visit the fields of this value.
+        match &v.layout().fields {
+            FieldsShape::Primitive => {}
+            &FieldsShape::Union(fields) => {
+                self.visit_union(v, fields)?;
             }
-
-            // Default recursors. Not meant to be overloaded.
-            fn walk_aggregate(
-                &mut self,
-                v: &Self::V,
-                fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
-            ) -> InterpResult<'tcx> {
-                // Now iterate over it.
-                for (idx, field_val) in fields.enumerate() {
-                    self.visit_field(v, idx, &field_val?)?;
+            FieldsShape::Arbitrary { offsets, memory_index } => {
+                for idx in 0..offsets.len() {
+                    let idx = Self::aggregate_field_order(memory_index, idx);
+                    let field = self.ecx().project_field(v, idx)?;
+                    self.visit_field(v, idx, &field)?;
                 }
-                Ok(())
             }
-            fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
-            {
-                let ty = v.layout().ty;
-                trace!("walk_value: type: {ty}");
-
-                // Special treatment for special types, where the (static) layout is not sufficient.
-                match *ty.kind() {
-                    // If it is a trait object, switch to the real type that was used to create it.
-                    ty::Dynamic(_, _, ty::Dyn) => {
-                        // Dyn types. This is unsized, and the actual dynamic type of the data is given by the
-                        // vtable stored in the place metadata.
-                        // unsized values are never immediate, so we can assert_mem_place
-                        let op = v.to_op_for_read(self.ecx())?;
-                        let dest = op.assert_mem_place();
-                        let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0;
-                        trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
-                        // recurse with the inner type
-                        return self.visit_field(&v, 0, &$value_trait::from_op(&inner_mplace.into()));
-                    },
-                    ty::Dynamic(_, _, ty::DynStar) => {
-                        // DynStar types. Very different from a dyn type (but strangely part of the
-                        // same variant in `TyKind`): These are pairs where the 2nd component is the
-                        // vtable, and the first component is the data (which must be ptr-sized).
-                        let op = v.to_op_for_proj(self.ecx())?;
-                        let data = self.ecx().unpack_dyn_star(&op)?.0;
-                        return self.visit_field(&v, 0, &$value_trait::from_op(&data));
-                    }
-                    // Slices do not need special handling here: they have `Array` field
-                    // placement with length 0, so we enter the `Array` case below which
-                    // indirectly uses the metadata to determine the actual length.
-
-                    // However, `Box`... let's talk about `Box`.
-                    ty::Adt(def, ..) if def.is_box() => {
-                        // `Box` is a hybrid primitive-library-defined type that one the one hand is
-                        // a dereferenceable pointer, on the other hand has *basically arbitrary
-                        // user-defined layout* since the user controls the 'allocator' field. So it
-                        // cannot be treated like a normal pointer, since it does not fit into an
-                        // `Immediate`. Yeah, it is quite terrible. But many visitors want to do
-                        // something with "all boxed pointers", so we handle this mess for them.
-                        //
-                        // When we hit a `Box`, we do not do the usual `visit_aggregate`; instead,
-                        // we (a) call `visit_box` on the pointer value, and (b) recurse on the
-                        // allocator field. We also assert tons of things to ensure we do not miss
-                        // any other fields.
-
-                        // `Box` has two fields: the pointer we care about, and the allocator.
-                        assert_eq!(v.layout().fields.count(), 2, "`Box` must have exactly 2 fields");
-                        let (unique_ptr, alloc) =
-                            (v.project_field(self.ecx(), 0)?, v.project_field(self.ecx(), 1)?);
-                        // Unfortunately there is some type junk in the way here: `unique_ptr` is a `Unique`...
-                        // (which means another 2 fields, the second of which is a `PhantomData`)
-                        assert_eq!(unique_ptr.layout().fields.count(), 2);
-                        let (nonnull_ptr, phantom) = (
-                            unique_ptr.project_field(self.ecx(), 0)?,
-                            unique_ptr.project_field(self.ecx(), 1)?,
-                        );
-                        assert!(
-                            phantom.layout().ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()),
-                            "2nd field of `Unique` should be PhantomData but is {:?}",
-                            phantom.layout().ty,
-                        );
-                        // ... that contains a `NonNull`... (gladly, only a single field here)
-                        assert_eq!(nonnull_ptr.layout().fields.count(), 1);
-                        let raw_ptr = nonnull_ptr.project_field(self.ecx(), 0)?; // the actual raw ptr
-                        // ... whose only field finally is a raw ptr we can dereference.
-                        self.visit_box(&raw_ptr)?;
-
-                        // The second `Box` field is the allocator, which we recursively check for validity
-                        // like in regular structs.
-                        self.visit_field(v, 1, &alloc)?;
-
-                        // We visited all parts of this one.
-                        return Ok(());
-                    }
-                    _ => {},
-                };
-
-                // Visit the fields of this value.
-                match &v.layout().fields {
-                    FieldsShape::Primitive => {}
-                    &FieldsShape::Union(fields) => {
-                        self.visit_union(v, fields)?;
-                    }
-                    FieldsShape::Arbitrary { offsets, .. } => {
-                        // FIXME: We collect in a vec because otherwise there are lifetime
-                        // errors: Projecting to a field needs access to `ecx`.
-                        let fields: Vec<InterpResult<'tcx, Self::V>> =
-                            (0..offsets.len()).map(|i| {
-                                v.project_field(self.ecx(), i)
-                            })
-                            .collect();
-                        self.visit_aggregate(v, fields.into_iter())?;
-                    }
-                    FieldsShape::Array { .. } => {
-                        // Let's get an mplace (or immediate) first.
-                        // FIXME: This might `force_allocate` if `v` is a `PlaceTy`!
-                        let op = v.to_op_for_proj(self.ecx())?;
-                        // Now we can go over all the fields.
-                        // This uses the *run-time length*, i.e., if we are a slice,
-                        // the dynamic info from the metadata is used.
-                        let iter = self.ecx().operand_array_fields(&op)?
-                            .map(|f| f.and_then(|f| {
-                                Ok($value_trait::from_op(&f))
-                            }));
-                        self.visit_aggregate(v, iter)?;
-                    }
+            FieldsShape::Array { .. } => {
+                for (idx, field) in self.ecx().project_array_fields(v)?.enumerate() {
+                    self.visit_field(v, idx, &field?)?;
                 }
+            }
+        }
 
-                match v.layout().variants {
-                    // If this is a multi-variant layout, find the right variant and proceed
-                    // with *its* fields.
-                    Variants::Multiple { .. } => {
-                        let op = v.to_op_for_read(self.ecx())?;
-                        let idx = self.read_discriminant(&op)?;
-                        let inner = v.project_downcast(self.ecx(), idx)?;
-                        trace!("walk_value: variant layout: {:#?}", inner.layout());
-                        // recurse with the inner type
-                        self.visit_variant(v, idx, &inner)
-                    }
-                    // For single-variant layouts, we already did anything there is to do.
-                    Variants::Single { .. } => Ok(())
-                }
+        match v.layout().variants {
+            // If this is a multi-variant layout, find the right variant and proceed
+            // with *its* fields.
+            Variants::Multiple { .. } => {
+                let idx = self.read_discriminant(v)?;
+                // There are 3 cases where downcasts can turn a Scalar/ScalarPair into a different ABI which
+                // could be a problem for `ImmTy` (see layout_sanity_check):
+                // - variant.size == Size::ZERO: works fine because `ImmTy::offset` has a special case for
+                //   zero-sized layouts.
+                // - variant.fields.count() == 0: works fine because `ImmTy::offset` has a special case for
+                //   zero-field aggregates.
+                // - variant.abi.is_uninhabited(): triggers UB in `read_discriminant` so we never get here.
+                let inner = self.ecx().project_downcast(v, idx)?;
+                trace!("walk_value: variant layout: {:#?}", inner.layout());
+                // recurse with the inner type
+                self.visit_variant(v, idx, &inner)?;
             }
+            // For single-variant layouts, we already did anything there is to do.
+            Variants::Single { .. } => {}
         }
+
+        Ok(())
     }
 }
-
-make_value_visitor!(ValueVisitor, Value,);
-make_value_visitor!(MutValueVisitor, ValueMut, mut);