about summary refs log tree commit diff
path: root/compiler/rustc_mir/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_mir/src')
-rw-r--r--compiler/rustc_mir/src/const_eval/mod.rs2
-rw-r--r--compiler/rustc_mir/src/interpret/intrinsics.rs18
-rw-r--r--compiler/rustc_mir/src/interpret/machine.rs43
-rw-r--r--compiler/rustc_mir/src/interpret/memory.rs359
-rw-r--r--compiler/rustc_mir/src/interpret/mod.rs2
-rw-r--r--compiler/rustc_mir/src/interpret/operand.rs26
-rw-r--r--compiler/rustc_mir/src/interpret/place.rs90
-rw-r--r--compiler/rustc_mir/src/interpret/step.rs42
-rw-r--r--compiler/rustc_mir/src/interpret/traits.rs71
-rw-r--r--compiler/rustc_mir/src/interpret/validity.rs60
-rw-r--r--compiler/rustc_mir/src/lib.rs2
11 files changed, 440 insertions, 275 deletions
diff --git a/compiler/rustc_mir/src/const_eval/mod.rs b/compiler/rustc_mir/src/const_eval/mod.rs
index 3f14efc920f..6a514e9f62f 100644
--- a/compiler/rustc_mir/src/const_eval/mod.rs
+++ b/compiler/rustc_mir/src/const_eval/mod.rs
@@ -181,7 +181,7 @@ pub(crate) fn deref_const<'tcx>(
     let mplace = ecx.deref_operand(&op).unwrap();
     if let Scalar::Ptr(ptr) = mplace.ptr {
         assert_eq!(
-            ecx.memory.get_raw(ptr.alloc_id).unwrap().mutability,
+            tcx.get_global_alloc(ptr.alloc_id).unwrap().unwrap_memory().mutability,
             Mutability::Not,
             "deref_const cannot be used with mutable allocations as \
             that could allow pattern matching to observe mutable statics",
diff --git a/compiler/rustc_mir/src/interpret/intrinsics.rs b/compiler/rustc_mir/src/interpret/intrinsics.rs
index 69ab50fa86e..99622fb310a 100644
--- a/compiler/rustc_mir/src/interpret/intrinsics.rs
+++ b/compiler/rustc_mir/src/interpret/intrinsics.rs
@@ -14,7 +14,7 @@ use rustc_middle::ty;
 use rustc_middle::ty::subst::SubstsRef;
 use rustc_middle::ty::{Ty, TyCtxt};
 use rustc_span::symbol::{sym, Symbol};
-use rustc_target::abi::{Abi, LayoutOf as _, Primitive, Size};
+use rustc_target::abi::{Abi, Align, LayoutOf as _, Primitive, Size};
 
 use super::{
     util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
@@ -525,7 +525,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         self.memory.check_ptr_access_align(
             min_ptr,
             Size::from_bytes(size),
-            None,
+            Align::ONE,
             CheckInAllocMsg::PointerArithmeticTest,
         )?;
         Ok(offset_ptr)
@@ -549,17 +549,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             )
         })?;
 
-        // Make sure we check both pointers for an access of the total size and aligment,
-        // *even if* the total size is 0.
-        let src =
-            self.memory.check_ptr_access(self.read_scalar(&src)?.check_init()?, size, align)?;
+        let src = self.read_scalar(&src)?.check_init()?;
+        let dst = self.read_scalar(&dst)?.check_init()?;
 
-        let dst =
-            self.memory.check_ptr_access(self.read_scalar(&dst)?.check_init()?, size, align)?;
-
-        if let (Some(src), Some(dst)) = (src, dst) {
-            self.memory.copy(src, dst, size, nonoverlapping)?;
-        }
-        Ok(())
+        self.memory.copy(src, align, dst, align, size, nonoverlapping)
     }
 }
diff --git a/compiler/rustc_mir/src/interpret/machine.rs b/compiler/rustc_mir/src/interpret/machine.rs
index 52baf1a6330..e7d7c38cc8f 100644
--- a/compiler/rustc_mir/src/interpret/machine.rs
+++ b/compiler/rustc_mir/src/interpret/machine.rs
@@ -13,8 +13,8 @@ use rustc_target::abi::Size;
 use rustc_target::spec::abi::Abi;
 
 use super::{
-    AllocId, Allocation, AllocationExtra, CheckInAllocMsg, Frame, ImmTy, InterpCx, InterpResult,
-    LocalValue, MemPlace, Memory, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Scalar,
+    AllocId, Allocation, CheckInAllocMsg, Frame, ImmTy, InterpCx, InterpResult, LocalValue,
+    MemPlace, Memory, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Scalar,
 };
 
 /// Data returned by Machine::stack_pop,
@@ -105,7 +105,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
     type MemoryExtra;
 
     /// Extra data stored in every allocation.
-    type AllocExtra: AllocationExtra<Self::PointerTag> + 'static;
+    type AllocExtra: Debug + Clone + 'static;
 
     /// Memory's allocation map
     type MemoryMap: AllocMap<
@@ -305,10 +305,39 @@ pub trait Machine<'mir, 'tcx>: Sized {
         kind: Option<MemoryKind<Self::MemoryKind>>,
     ) -> (Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>>, Self::PointerTag);
 
-    /// Called to notify the machine before a deallocation occurs.
-    fn before_deallocation(
+    /// Hook for performing extra checks on a memory read access.
+    ///
+    /// Takes read-only access to the allocation so we can keep all the memory read
+    /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
+    /// need to mutate.
+    #[inline(always)]
+    fn memory_read(
+        _memory_extra: &Self::MemoryExtra,
+        _alloc_extra: &Self::AllocExtra,
+        _ptr: Pointer<Self::PointerTag>,
+        _size: Size,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Hook for performing extra checks on a memory write access.
+    #[inline(always)]
+    fn memory_written(
         _memory_extra: &mut Self::MemoryExtra,
-        _id: AllocId,
+        _alloc_extra: &mut Self::AllocExtra,
+        _ptr: Pointer<Self::PointerTag>,
+        _size: Size,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Hook for performing extra operations on a memory deallocation.
+    #[inline(always)]
+    fn memory_deallocated(
+        _memory_extra: &mut Self::MemoryExtra,
+        _alloc_extra: &mut Self::AllocExtra,
+        _ptr: Pointer<Self::PointerTag>,
+        _size: Size,
     ) -> InterpResult<'tcx> {
         Ok(())
     }
@@ -322,7 +351,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
         Ok(())
     }
 
-    /// Executes a retagging operation
+    /// Executes a retagging operation.
     #[inline]
     fn retag(
         _ecx: &mut InterpCx<'mir, 'tcx, Self>,
diff --git a/compiler/rustc_mir/src/interpret/memory.rs b/compiler/rustc_mir/src/interpret/memory.rs
index 5200e4aa90d..7fb7c51b0b5 100644
--- a/compiler/rustc_mir/src/interpret/memory.rs
+++ b/compiler/rustc_mir/src/interpret/memory.rs
@@ -18,8 +18,8 @@ use rustc_middle::ty::{Instance, ParamEnv, TyCtxt};
 use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout};
 
 use super::{
-    AllocId, AllocMap, Allocation, AllocationExtra, CheckInAllocMsg, GlobalAlloc, InterpResult,
-    Machine, MayLeak, Pointer, PointerArithmetic, Scalar,
+    alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc,
+    InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Scalar, ScalarMaybeUninit,
 };
 use crate::util::pretty;
 
@@ -125,6 +125,24 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for Memory<'mir, 'tcx, M>
     }
 }
 
+/// A reference to some allocation that was already bounds-checked for the given region
+/// and had the on-access machine hooks run.
+#[derive(Copy, Clone)]
+pub struct AllocRef<'a, 'tcx, Tag, Extra> {
+    alloc: &'a Allocation<Tag, Extra>,
+    range: AllocRange,
+    tcx: TyCtxt<'tcx>,
+    alloc_id: AllocId,
+}
+/// A reference to some allocation that was already bounds-checked for the given region
+/// and had the on-access machine hooks run.
+pub struct AllocRefMut<'a, 'tcx, Tag, Extra> {
+    alloc: &'a mut Allocation<Tag, Extra>,
+    range: AllocRange,
+    tcx: TyCtxt<'tcx>,
+    alloc_id: AllocId,
+}
+
 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
     pub fn new(tcx: TyCtxt<'tcx>, extra: M::MemoryExtra) -> Self {
         Memory {
@@ -246,7 +264,15 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
             Some((size, _align)) => size,
             None => self.get_raw(ptr.alloc_id)?.size(),
         };
-        self.copy(ptr, new_ptr, old_size.min(new_size), /*nonoverlapping*/ true)?;
+        // This will also call the access hooks.
+        self.copy(
+            ptr.into(),
+            Align::ONE,
+            new_ptr.into(),
+            Align::ONE,
+            old_size.min(new_size),
+            /*nonoverlapping*/ true,
+        )?;
         self.deallocate(ptr, old_size_and_align, kind)?;
 
         Ok(new_ptr)
@@ -278,8 +304,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
             );
         }
 
-        M::before_deallocation(&mut self.extra, ptr.alloc_id)?;
-
         let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) {
             Some(alloc) => alloc,
             None => {
@@ -320,10 +344,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
 
         // Let the machine take some extra action
         let size = alloc.size();
-        AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?;
+        M::memory_deallocated(&mut self.extra, &mut alloc.extra, ptr, size)?;
 
         // Don't forget to remember size and align of this now-dead allocation
-        let old = self.dead_alloc_map.insert(ptr.alloc_id, (alloc.size(), alloc.align));
+        let old = self.dead_alloc_map.insert(ptr.alloc_id, (size, alloc.align));
         if old.is_some() {
             bug!("Nothing can be deallocated twice");
         }
@@ -331,40 +355,53 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
         Ok(())
     }
 
-    /// Check if the given scalar is allowed to do a memory access of given `size`
-    /// and `align`. On success, returns `None` for zero-sized accesses (where
-    /// nothing else is left to do) and a `Pointer` to use for the actual access otherwise.
-    /// Crucially, if the input is a `Pointer`, we will test it for liveness
-    /// *even if* the size is 0.
-    ///
-    /// Everyone accessing memory based on a `Scalar` should use this method to get the
-    /// `Pointer` they need. And even if you already have a `Pointer`, call this method
-    /// to make sure it is sufficiently aligned and not dangling.  Not doing that may
-    /// cause ICEs.
-    ///
-    /// Most of the time you should use `check_mplace_access`, but when you just have a pointer,
-    /// this method is still appropriate.
+    /// Internal helper function for APIs that offer memory access based on `Scalar` pointers.
     #[inline(always)]
-    pub fn check_ptr_access(
+    pub(super) fn check_ptr_access(
         &self,
         sptr: Scalar<M::PointerTag>,
         size: Size,
         align: Align,
     ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
         let align = M::enforce_alignment(&self.extra).then_some(align);
-        self.check_ptr_access_align(sptr, size, align, CheckInAllocMsg::MemoryAccessTest)
+        self.check_and_deref_ptr(sptr, size, align, CheckInAllocMsg::MemoryAccessTest, |ptr| {
+            let (size, align) =
+                self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
+            Ok((size, align, ptr))
+        })
     }
 
-    /// Like `check_ptr_access`, but *definitely* checks alignment when `align`
-    /// is `Some` (overriding `M::enforce_alignment`). Also lets the caller control
-    /// the error message for the out-of-bounds case.
+    /// Check if the given scalar is allowed to do a memory access of given `size` and `align`
+    /// (ignoring `M::enforce_alignment`). The caller can control the error message for the
+    /// out-of-bounds case.
+    #[inline(always)]
     pub fn check_ptr_access_align(
         &self,
         sptr: Scalar<M::PointerTag>,
         size: Size,
+        align: Align,
+        msg: CheckInAllocMsg,
+    ) -> InterpResult<'tcx> {
+        self.check_and_deref_ptr(sptr, size, Some(align), msg, |ptr| {
+            let (size, align) =
+                self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
+            Ok((size, align, ()))
+        })?;
+        Ok(())
+    }
+
+    /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
+    /// to the allocation it points to. Supports both shared and mutable references, to the actual
+    /// checking is offloaded to a helper closure. `align` defines whether and which alignment check
+    /// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
+    fn check_and_deref_ptr<T>(
+        &self,
+        sptr: Scalar<M::PointerTag>,
+        size: Size,
         align: Option<Align>,
         msg: CheckInAllocMsg,
-    ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
+        alloc_size: impl FnOnce(Pointer<M::PointerTag>) -> InterpResult<'tcx, (Size, Align, T)>,
+    ) -> InterpResult<'tcx, Option<T>> {
         fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
             if offset % align.bytes() == 0 {
                 Ok(())
@@ -402,8 +439,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
                 None
             }
             Err(ptr) => {
-                let (allocation_size, alloc_align) =
-                    self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
+                let (allocation_size, alloc_align, ret_val) = alloc_size(ptr)?;
                 // Test bounds. This also ensures non-null.
                 // It is sufficient to check this for the end pointer. The addition
                 // checks for overflow.
@@ -431,7 +467,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
 
                 // We can still be zero-sized in this branch, in which case we have to
                 // return `None`.
-                if size.bytes() == 0 { None } else { Some(ptr) }
+                if size.bytes() == 0 { None } else { Some(ret_val) }
             }
         })
     }
@@ -502,8 +538,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
     }
 
     /// Gives raw access to the `Allocation`, without bounds or alignment checks.
-    /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
-    pub fn get_raw(
+    /// The caller is responsible for calling the access hooks!
+    fn get_raw(
         &self,
         id: AllocId,
     ) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
@@ -537,14 +573,54 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
         }
     }
 
+    /// "Safe" (bounds and align-checked) allocation access.
+    pub fn get<'a>(
+        &'a self,
+        sptr: Scalar<M::PointerTag>,
+        size: Size,
+        align: Align,
+    ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
+        let align = M::enforce_alignment(&self.extra).then_some(align);
+        let ptr_and_alloc = self.check_and_deref_ptr(
+            sptr,
+            size,
+            align,
+            CheckInAllocMsg::MemoryAccessTest,
+            |ptr| {
+                let alloc = self.get_raw(ptr.alloc_id)?;
+                Ok((alloc.size(), alloc.align, (ptr, alloc)))
+            },
+        )?;
+        if let Some((ptr, alloc)) = ptr_and_alloc {
+            M::memory_read(&self.extra, &alloc.extra, ptr, size)?;
+            let range = alloc_range(ptr.offset, size);
+            Ok(Some(AllocRef { alloc, range, tcx: self.tcx, alloc_id: ptr.alloc_id }))
+        } else {
+            // Even in this branch we have to be sure that we actually access the allocation, in
+            // order to ensure that `static FOO: Type = FOO;` causes a cycle error instead of
+            // magically pulling *any* ZST value from the ether. However, the `get_raw` above is
+            // always called when `sptr` is truly a `Pointer`, so we are good.
+            Ok(None)
+        }
+    }
+
+    /// Return the `extra` field of the given allocation.
+    pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
+        Ok(&self.get_raw(id)?.extra)
+    }
+
     /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
-    /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
-    pub fn get_raw_mut(
+    /// The caller is responsible for calling the access hooks!
+    ///
+    /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
+    /// allocation.
+    fn get_raw_mut(
         &mut self,
         id: AllocId,
-    ) -> InterpResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
+    ) -> InterpResult<'tcx, (&mut Allocation<M::PointerTag, M::AllocExtra>, &mut M::MemoryExtra)>
+    {
         let tcx = self.tcx;
-        let memory_extra = &self.extra;
+        let memory_extra = &mut self.extra;
         let a = self.alloc_map.get_mut_or(id, || {
             // Need to make a copy, even if `get_global_alloc` is able
             // to give us a cheap reference.
@@ -567,11 +643,40 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
                 if a.mutability == Mutability::Not {
                     throw_ub!(WriteToReadOnly(id))
                 }
-                Ok(a)
+                Ok((a, memory_extra))
             }
         }
     }
 
+    /// "Safe" (bounds and align-checked) allocation access.
+    pub fn get_mut<'a>(
+        &'a mut self,
+        sptr: Scalar<M::PointerTag>,
+        size: Size,
+        align: Align,
+    ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
+        let ptr = self.check_ptr_access(sptr, size, align)?;
+        if let Some(ptr) = ptr {
+            let tcx = self.tcx;
+            // FIXME: can we somehow avoid looking up the allocation twice here?
+            // We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
+            let (alloc, extra) = self.get_raw_mut(ptr.alloc_id)?;
+            M::memory_written(extra, &mut alloc.extra, ptr, size)?;
+            let range = alloc_range(ptr.offset, size);
+            Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id: ptr.alloc_id }))
+        } else {
+            Ok(None)
+        }
+    }
+
+    /// Return the `extra` field of the given allocation.
+    pub fn get_alloc_extra_mut<'a>(
+        &'a mut self,
+        id: AllocId,
+    ) -> InterpResult<'tcx, &'a mut M::AllocExtra> {
+        Ok(&mut self.get_raw_mut(id)?.0.extra)
+    }
+
     /// Obtain the size and alignment of an allocation, even if that allocation has
     /// been deallocated.
     ///
@@ -596,7 +701,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
                 // The caller requested no function pointers.
                 throw_ub!(DerefFunctionPointer(id))
             } else {
-                Ok((Size::ZERO, Align::from_bytes(1).unwrap()))
+                Ok((Size::ZERO, Align::ONE))
             };
         }
 
@@ -658,7 +763,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
     }
 
     pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
-        self.get_raw_mut(id)?.mutability = Mutability::Not;
+        self.get_raw_mut(id)?.0.mutability = Mutability::Not;
         Ok(())
     }
 
@@ -792,16 +897,62 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a,
 }
 
 /// Reading and writing.
+impl<'tcx, 'a, Tag: Copy, Extra> AllocRefMut<'a, 'tcx, Tag, Extra> {
+    pub fn write_scalar(
+        &mut self,
+        range: AllocRange,
+        val: ScalarMaybeUninit<Tag>,
+    ) -> InterpResult<'tcx> {
+        Ok(self
+            .alloc
+            .write_scalar(&self.tcx, self.range.subrange(range), val)
+            .map_err(|e| e.to_interp_error(self.alloc_id))?)
+    }
+
+    pub fn write_ptr_sized(
+        &mut self,
+        offset: Size,
+        val: ScalarMaybeUninit<Tag>,
+    ) -> InterpResult<'tcx> {
+        self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
+    }
+}
+
+impl<'tcx, 'a, Tag: Copy, Extra> AllocRef<'a, 'tcx, Tag, Extra> {
+    pub fn read_scalar(&self, range: AllocRange) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
+        Ok(self
+            .alloc
+            .read_scalar(&self.tcx, self.range.subrange(range))
+            .map_err(|e| e.to_interp_error(self.alloc_id))?)
+    }
+
+    pub fn read_ptr_sized(&self, offset: Size) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
+        self.read_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size))
+    }
+
+    pub fn check_bytes(&self, range: AllocRange, allow_uninit_and_ptr: bool) -> InterpResult<'tcx> {
+        Ok(self
+            .alloc
+            .check_bytes(&self.tcx, self.range.subrange(range), allow_uninit_and_ptr)
+            .map_err(|e| e.to_interp_error(self.alloc_id))?)
+    }
+}
+
 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
     /// Reads the given number of bytes from memory. Returns them as a slice.
     ///
     /// Performs appropriate bounds checks.
-    pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> InterpResult<'tcx, &[u8]> {
-        let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
-            Some(ptr) => ptr,
+    pub fn read_bytes(&self, sptr: Scalar<M::PointerTag>, size: Size) -> InterpResult<'tcx, &[u8]> {
+        let alloc_ref = match self.get(sptr, size, Align::ONE)? {
+            Some(a) => a,
             None => return Ok(&[]), // zero-sized access
         };
-        self.get_raw(ptr.alloc_id)?.get_bytes(self, ptr, size)
+        // Side-step AllocRef and directly access the underlying bytes more efficiently.
+        // (We are staying inside the bounds here so all is good.)
+        Ok(alloc_ref
+            .alloc
+            .get_bytes(&alloc_ref.tcx, alloc_ref.range)
+            .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?)
     }
 
     /// Writes the given stream of bytes into memory.
@@ -809,14 +960,17 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
     /// Performs appropriate bounds checks.
     pub fn write_bytes(
         &mut self,
-        ptr: Scalar<M::PointerTag>,
+        sptr: Scalar<M::PointerTag>,
         src: impl IntoIterator<Item = u8>,
     ) -> InterpResult<'tcx> {
         let mut src = src.into_iter();
-        let size = Size::from_bytes(src.size_hint().0);
-        // `write_bytes` checks that this lower bound `size` matches the upper bound and reality.
-        let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
-            Some(ptr) => ptr,
+        let (lower, upper) = src.size_hint();
+        let len = upper.expect("can only write bounded iterators");
+        assert_eq!(lower, len, "can only write iterators with a precise length");
+
+        let size = Size::from_bytes(len);
+        let alloc_ref = match self.get_mut(sptr, size, Align::ONE)? {
+            Some(alloc_ref) => alloc_ref,
             None => {
                 // zero-sized access
                 assert_matches!(
@@ -827,56 +981,88 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
                 return Ok(());
             }
         };
-        let tcx = self.tcx;
-        self.get_raw_mut(ptr.alloc_id)?.write_bytes(&tcx, ptr, src)
+
+        // Side-step AllocRef and directly access the underlying bytes more efficiently.
+        // (We are staying inside the bounds here so all is good.)
+        let bytes = alloc_ref.alloc.get_bytes_mut(&alloc_ref.tcx, alloc_ref.range);
+        // `zip` would stop when the first iterator ends; we want to definitely
+        // cover all of `bytes`.
+        for dest in bytes {
+            *dest = src.next().expect("iterator was shorter than it said it would be");
+        }
+        assert_matches!(src.next(), None, "iterator was longer than it said it would be");
+        Ok(())
     }
 
-    /// Expects the caller to have checked bounds and alignment.
     pub fn copy(
         &mut self,
-        src: Pointer<M::PointerTag>,
-        dest: Pointer<M::PointerTag>,
+        src: Scalar<M::PointerTag>,
+        src_align: Align,
+        dest: Scalar<M::PointerTag>,
+        dest_align: Align,
         size: Size,
         nonoverlapping: bool,
     ) -> InterpResult<'tcx> {
-        self.copy_repeatedly(src, dest, size, 1, nonoverlapping)
+        self.copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
     }
 
-    /// Expects the caller to have checked bounds and alignment.
     pub fn copy_repeatedly(
         &mut self,
-        src: Pointer<M::PointerTag>,
-        dest: Pointer<M::PointerTag>,
+        src: Scalar<M::PointerTag>,
+        src_align: Align,
+        dest: Scalar<M::PointerTag>,
+        dest_align: Align,
         size: Size,
-        length: u64,
+        num_copies: u64,
         nonoverlapping: bool,
     ) -> InterpResult<'tcx> {
+        let tcx = self.tcx;
+        // We need to do our own bounds-checks.
+        let src = self.check_ptr_access(src, size, src_align)?;
+        let dest = self.check_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
+
+        // FIXME: we look up both allocations twice here, once ebfore for the `check_ptr_access`
+        // and once below to get the underlying `&[mut] Allocation`.
+
+        // Source alloc preparations and access hooks.
+        let src = match src {
+            None => return Ok(()), // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
+            Some(src_ptr) => src_ptr,
+        };
+        let src_alloc = self.get_raw(src.alloc_id)?;
+        M::memory_read(&self.extra, &src_alloc.extra, src, size)?;
+        // We need the `dest` ptr for the next operation, so we get it now.
+        // We already did the source checks and called the hooks so we are good to return early.
+        let dest = match dest {
+            None => return Ok(()), // Zero-sized *destiantion*.
+            Some(dest_ptr) => dest_ptr,
+        };
+
         // first copy the relocations to a temporary buffer, because
         // `get_bytes_mut` will clear the relocations, which is correct,
         // since we don't want to keep any relocations at the target.
         // (`get_bytes_with_uninit_and_ptr` below checks that there are no
         // relocations overlapping the edges; those would not be handled correctly).
-        let relocations =
-            self.get_raw(src.alloc_id)?.prepare_relocation_copy(self, src, size, dest, length);
-
-        let tcx = self.tcx;
-
-        // This checks relocation edges on the src.
-        let src_bytes =
-            self.get_raw(src.alloc_id)?.get_bytes_with_uninit_and_ptr(&tcx, src, size)?.as_ptr();
-        let dest_bytes =
-            self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?; // `Size` multiplication
-
-        // If `dest_bytes` is empty we just optimize to not run anything for zsts.
-        // See #67539
-        if dest_bytes.is_empty() {
-            return Ok(());
-        }
-
-        let dest_bytes = dest_bytes.as_mut_ptr();
-
+        let relocations = src_alloc.prepare_relocation_copy(
+            self,
+            alloc_range(src.offset, size),
+            dest.offset,
+            num_copies,
+        );
         // Prepare a copy of the initialization mask.
-        let compressed = self.get_raw(src.alloc_id)?.compress_uninit_range(src, size);
+        let compressed = src_alloc.compress_uninit_range(src, size);
+        // This checks relocation edges on the src.
+        let src_bytes = src_alloc
+            .get_bytes_with_uninit_and_ptr(&tcx, alloc_range(src.offset, size))
+            .map_err(|e| e.to_interp_error(src.alloc_id))?
+            .as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
+
+        // Destination alloc preparations and access hooks.
+        let (dest_alloc, extra) = self.get_raw_mut(dest.alloc_id)?;
+        M::memory_written(extra, &mut dest_alloc.extra, dest, size * num_copies)?;
+        let dest_bytes = dest_alloc
+            .get_bytes_mut_ptr(&tcx, alloc_range(dest.offset, size * num_copies))
+            .as_mut_ptr();
 
         if compressed.no_bytes_init() {
             // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
@@ -885,8 +1071,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
             // This also avoids writing to the target bytes so that the backing allocation is never
             // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
             // operating system this can avoid physically allocating the page.
-            let dest_alloc = self.get_raw_mut(dest.alloc_id)?;
-            dest_alloc.mark_init(dest, size * length, false); // `Size` multiplication
+            dest_alloc.mark_init(alloc_range(dest.offset, size * num_copies), false); // `Size` multiplication
             dest_alloc.mark_relocation_range(relocations);
             return Ok(());
         }
@@ -907,7 +1092,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
                     }
                 }
 
-                for i in 0..length {
+                for i in 0..num_copies {
                     ptr::copy(
                         src_bytes,
                         dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
@@ -915,7 +1100,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
                     );
                 }
             } else {
-                for i in 0..length {
+                for i in 0..num_copies {
                     ptr::copy_nonoverlapping(
                         src_bytes,
                         dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
@@ -925,16 +1110,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
             }
         }
 
-        // now fill in all the data
-        self.get_raw_mut(dest.alloc_id)?.mark_compressed_init_range(
-            &compressed,
-            dest,
-            size,
-            length,
-        );
-
+        // now fill in all the "init" data
+        dest_alloc.mark_compressed_init_range(&compressed, dest, size, num_copies);
         // copy the relocations to the destination
-        self.get_raw_mut(dest.alloc_id)?.mark_relocation_range(relocations);
+        dest_alloc.mark_relocation_range(relocations);
 
         Ok(())
     }
diff --git a/compiler/rustc_mir/src/interpret/mod.rs b/compiler/rustc_mir/src/interpret/mod.rs
index a29ef117ace..9b95f691167 100644
--- a/compiler/rustc_mir/src/interpret/mod.rs
+++ b/compiler/rustc_mir/src/interpret/mod.rs
@@ -21,7 +21,7 @@ pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in
 pub use self::eval_context::{Frame, FrameInfo, InterpCx, LocalState, LocalValue, StackPopCleanup};
 pub use self::intern::{intern_const_alloc_recursive, InternKind};
 pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
-pub use self::memory::{AllocCheck, FnVal, Memory, MemoryKind};
+pub use self::memory::{AllocCheck, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
 pub use self::operand::{ImmTy, Immediate, OpTy, Operand};
 pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy};
 pub use self::validity::{CtfeValidationMode, RefTracking};
diff --git a/compiler/rustc_mir/src/interpret/operand.rs b/compiler/rustc_mir/src/interpret/operand.rs
index e5bc9320260..06432a8b902 100644
--- a/compiler/rustc_mir/src/interpret/operand.rs
+++ b/compiler/rustc_mir/src/interpret/operand.rs
@@ -15,8 +15,8 @@ use rustc_target::abi::{Abi, HasDataLayout, LayoutOf, Size, TagEncoding};
 use rustc_target::abi::{VariantIdx, Variants};
 
 use super::{
-    from_known_layout, mir_assign_valid_types, ConstValue, GlobalId, InterpCx, InterpResult,
-    MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Scalar, ScalarMaybeUninit,
+    alloc_range, from_known_layout, mir_assign_valid_types, ConstValue, GlobalId, InterpCx,
+    InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Scalar, ScalarMaybeUninit,
 };
 
 /// An `Immediate` represents a single immediate self-contained Rust value.
@@ -249,19 +249,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             return Ok(None);
         }
 
-        let ptr = match self
-            .check_mplace_access(mplace, None)
-            .expect("places should be checked on creation")
-        {
+        let alloc = match self.get_alloc(mplace)? {
             Some(ptr) => ptr,
             None => {
-                if let Scalar::Ptr(ptr) = mplace.ptr {
-                    // We may be reading from a static.
-                    // In order to ensure that `static FOO: Type = FOO;` causes a cycle error
-                    // instead of magically pulling *any* ZST value from the ether, we need to
-                    // actually access the referenced allocation.
-                    self.memory.get_raw(ptr.alloc_id)?;
-                }
                 return Ok(Some(ImmTy {
                     // zero-sized type
                     imm: Scalar::ZST.into(),
@@ -270,11 +260,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
         };
 
-        let alloc = self.memory.get_raw(ptr.alloc_id)?;
-
         match mplace.layout.abi {
             Abi::Scalar(..) => {
-                let scalar = alloc.read_scalar(self, ptr, mplace.layout.size)?;
+                let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?;
                 Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }))
             }
             Abi::ScalarPair(ref a, ref b) => {
@@ -283,12 +271,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
                 let (a, b) = (&a.value, &b.value);
                 let (a_size, b_size) = (a.size(self), b.size(self));
-                let a_ptr = ptr;
                 let b_offset = a_size.align_to(b.align(self).abi);
                 assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
-                let b_ptr = ptr.offset(b_offset, self)?;
-                let a_val = alloc.read_scalar(self, a_ptr, a_size)?;
-                let b_val = alloc.read_scalar(self, b_ptr, b_size)?;
+                let a_val = alloc.read_scalar(alloc_range(Size::ZERO, a_size))?;
+                let b_val = alloc.read_scalar(alloc_range(b_offset, b_size))?;
                 Ok(Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }))
             }
             _ => Ok(None),
diff --git a/compiler/rustc_mir/src/interpret/place.rs b/compiler/rustc_mir/src/interpret/place.rs
index d7c11aee21f..ef603b51554 100644
--- a/compiler/rustc_mir/src/interpret/place.rs
+++ b/compiler/rustc_mir/src/interpret/place.rs
@@ -14,9 +14,9 @@ use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding};
 use rustc_target::abi::{HasDataLayout, LayoutOf, Size, VariantIdx, Variants};
 
 use super::{
-    mir_assign_valid_types, AllocId, AllocMap, Allocation, AllocationExtra, ConstAlloc, ImmTy,
-    Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy, Operand, Pointer,
-    PointerArithmetic, Scalar, ScalarMaybeUninit,
+    alloc_range, mir_assign_valid_types, AllocId, AllocMap, AllocRef, AllocRefMut, Allocation,
+    ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy,
+    Operand, Pointer, PointerArithmetic, Scalar, ScalarMaybeUninit,
 };
 
 #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
@@ -293,7 +293,6 @@ where
     M: Machine<'mir, 'tcx, PointerTag = Tag>,
     // FIXME: Working around https://github.com/rust-lang/rust/issues/24159
     M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKind>, Allocation<Tag, M::AllocExtra>)>,
-    M::AllocExtra: AllocationExtra<Tag>,
 {
     /// Take a value, which represents a (thin or wide) reference, and make it a place.
     /// Alignment is just based on the type.  This is the inverse of `MemPlace::to_ref()`.
@@ -339,24 +338,26 @@ where
         self.mplace_access_checked(place, None)
     }
 
-    /// Check if the given place is good for memory access with the given
-    /// size, falling back to the layout's size if `None` (in the latter case,
-    /// this must be a statically sized type).
-    ///
-    /// On success, returns `None` for zero-sized accesses (where nothing else is
-    /// left to do) and a `Pointer` to use for the actual access otherwise.
     #[inline]
-    pub(super) fn check_mplace_access(
+    pub(super) fn get_alloc(
         &self,
         place: &MPlaceTy<'tcx, M::PointerTag>,
-        size: Option<Size>,
-    ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
-        let size = size.unwrap_or_else(|| {
-            assert!(!place.layout.is_unsized());
-            assert!(!place.meta.has_meta());
-            place.layout.size
-        });
-        self.memory.check_ptr_access(place.ptr, size, place.align)
+    ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::PointerTag, M::AllocExtra>>> {
+        assert!(!place.layout.is_unsized());
+        assert!(!place.meta.has_meta());
+        let size = place.layout.size;
+        self.memory.get(place.ptr, size, place.align)
+    }
+
+    #[inline]
+    pub(super) fn get_alloc_mut(
+        &mut self,
+        place: &MPlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::PointerTag, M::AllocExtra>>> {
+        assert!(!place.layout.is_unsized());
+        assert!(!place.meta.has_meta());
+        let size = place.layout.size;
+        self.memory.get_mut(place.ptr, size, place.align)
     }
 
     /// Return the "access-checked" version of this `MPlace`, where for non-ZST
@@ -373,10 +374,11 @@ where
             .size_and_align_of_mplace(&place)?
             .unwrap_or((place.layout.size, place.layout.align.abi));
         assert!(place.mplace.align <= align, "dynamic alignment less strict than static one?");
-        // Check (stricter) dynamic alignment, unless forced otherwise.
-        place.mplace.align = force_align.unwrap_or(align);
+        let align = force_align.unwrap_or(align);
+        // Record new (stricter, unless forced) alignment requirement in place.
+        place.mplace.align = align;
         // When dereferencing a pointer, it must be non-null, aligned, and live.
-        if let Some(ptr) = self.check_mplace_access(&place, Some(size))? {
+        if let Some(ptr) = self.memory.check_ptr_access(place.ptr, size, align)? {
             place.mplace.ptr = ptr.into();
         }
         Ok(place)
@@ -786,12 +788,12 @@ where
         // wrong type.
 
         // Invalid places are a thing: the return place of a diverging function
-        let ptr = match self.check_mplace_access(dest, None)? {
-            Some(ptr) => ptr,
+        let tcx = *self.tcx;
+        let mut alloc = match self.get_alloc_mut(dest)? {
+            Some(a) => a,
             None => return Ok(()), // zero-sized access
         };
 
-        let tcx = *self.tcx;
         // FIXME: We should check that there are dest.layout.size many bytes available in
         // memory.  The code below is not sufficient, with enough padding it might not
         // cover all the bytes!
@@ -805,12 +807,7 @@ where
                         dest.layout
                     ),
                 }
-                self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(
-                    &tcx,
-                    ptr,
-                    scalar,
-                    dest.layout.size,
-                )
+                alloc.write_scalar(alloc_range(Size::ZERO, dest.layout.size), scalar)
             }
             Immediate::ScalarPair(a_val, b_val) => {
                 // We checked `ptr_align` above, so all fields will have the alignment they need.
@@ -824,16 +821,15 @@ where
                         dest.layout
                     ),
                 };
-                let (a_size, b_size) = (a.size(self), b.size(self));
-                let b_offset = a_size.align_to(b.align(self).abi);
-                let b_ptr = ptr.offset(b_offset, self)?;
+                let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
+                let b_offset = a_size.align_to(b.align(&tcx).abi);
 
                 // It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
                 // but that does not work: We could be a newtype around a pair, then the
                 // fields do not match the `ScalarPair` components.
 
-                self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(&tcx, ptr, a_val, a_size)?;
-                self.memory.get_raw_mut(b_ptr.alloc_id)?.write_scalar(&tcx, b_ptr, b_val, b_size)
+                alloc.write_scalar(alloc_range(Size::ZERO, a_size), a_val)?;
+                alloc.write_scalar(alloc_range(b_offset, b_size), b_val)
             }
         }
     }
@@ -902,19 +898,8 @@ where
         });
         assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances");
 
-        let src = self
-            .check_mplace_access(&src, Some(size))
-            .expect("places should be checked on creation");
-        let dest = self
-            .check_mplace_access(&dest, Some(size))
-            .expect("places should be checked on creation");
-        let (src_ptr, dest_ptr) = match (src, dest) {
-            (Some(src_ptr), Some(dest_ptr)) => (src_ptr, dest_ptr),
-            (None, None) => return Ok(()), // zero-sized copy
-            _ => bug!("The pointers should both be Some or both None"),
-        };
-
-        self.memory.copy(src_ptr, dest_ptr, size, /*nonoverlapping*/ true)
+        self.memory
+            .copy(src.ptr, src.align, dest.ptr, dest.align, size, /*nonoverlapping*/ true)
     }
 
     /// Copies the data from an operand to a place. The layouts may disagree, but they must
@@ -1047,11 +1032,8 @@ where
     ) -> MPlaceTy<'tcx, M::PointerTag> {
         let ptr = self.memory.allocate_bytes(str.as_bytes(), kind);
         let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self);
-        let mplace = MemPlace {
-            ptr: ptr.into(),
-            align: Align::from_bytes(1).unwrap(),
-            meta: MemPlaceMeta::Meta(meta),
-        };
+        let mplace =
+            MemPlace { ptr: ptr.into(), align: Align::ONE, meta: MemPlaceMeta::Meta(meta) };
 
         let layout = self.layout_of(self.tcx.mk_static_str()).unwrap();
         MPlaceTy { mplace, layout }
diff --git a/compiler/rustc_mir/src/interpret/step.rs b/compiler/rustc_mir/src/interpret/step.rs
index 5a10ffe6d61..129dd8f8e01 100644
--- a/compiler/rustc_mir/src/interpret/step.rs
+++ b/compiler/rustc_mir/src/interpret/step.rs
@@ -222,28 +222,34 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
 
             Repeat(ref operand, _) => {
-                let op = self.eval_operand(operand, None)?;
+                let src = self.eval_operand(operand, None)?;
+                assert!(!src.layout.is_unsized());
                 let dest = self.force_allocation(&dest)?;
                 let length = dest.len(self)?;
 
-                if let Some(first_ptr) = self.check_mplace_access(&dest, None)? {
-                    // Write the first.
+                if length == 0 {
+                    // Nothing to copy... but let's still make sure that `dest` as a place is valid.
+                    self.get_alloc_mut(&dest)?;
+                } else {
+                    // Write the src to the first element.
                     let first = self.mplace_field(&dest, 0)?;
-                    self.copy_op(&op, &first.into())?;
-
-                    if length > 1 {
-                        let elem_size = first.layout.size;
-                        // Copy the rest. This is performance-sensitive code
-                        // for big static/const arrays!
-                        let rest_ptr = first_ptr.offset(elem_size, self)?;
-                        self.memory.copy_repeatedly(
-                            first_ptr,
-                            rest_ptr,
-                            elem_size,
-                            length - 1,
-                            /*nonoverlapping:*/ true,
-                        )?;
-                    }
+                    self.copy_op(&src, &first.into())?;
+
+                    // This is performance-sensitive code for big static/const arrays! So we
+                    // avoid writing each operand individually and instead just make many copies
+                    // of the first element.
+                    let elem_size = first.layout.size;
+                    let first_ptr = first.ptr;
+                    let rest_ptr = first_ptr.ptr_offset(elem_size, self)?;
+                    self.memory.copy_repeatedly(
+                        first_ptr,
+                        first.align,
+                        rest_ptr,
+                        first.align,
+                        elem_size,
+                        length - 1,
+                        /*nonoverlapping:*/ true,
+                    )?;
                 }
             }
 
diff --git a/compiler/rustc_mir/src/interpret/traits.rs b/compiler/rustc_mir/src/interpret/traits.rs
index 50603bdd45b..11f8d388820 100644
--- a/compiler/rustc_mir/src/interpret/traits.rs
+++ b/compiler/rustc_mir/src/interpret/traits.rs
@@ -62,32 +62,32 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let drop = Instance::resolve_drop_in_place(tcx, ty);
         let drop = self.memory.create_fn_alloc(FnVal::Instance(drop));
 
+        // Prepare the fn ptrs we will write into the vtable later.
+        let fn_ptrs = methods
+            .iter()
+            .enumerate() // remember the original position
+            .filter_map(|(i, method)| {
+                if let Some((def_id, substs)) = method { Some((i, def_id, substs)) } else { None }
+            })
+            .map(|(i, def_id, substs)| {
+                let instance =
+                    ty::Instance::resolve_for_vtable(tcx, self.param_env, *def_id, substs)
+                        .ok_or_else(|| err_inval!(TooGeneric))?;
+                Ok((i, self.memory.create_fn_alloc(FnVal::Instance(instance))))
+            })
+            .collect::<InterpResult<'tcx, Vec<(usize, Pointer<M::PointerTag>)>>>()?;
+
         // No need to do any alignment checks on the memory accesses below, because we know the
         // allocation is correctly aligned as we created it above. Also we're only offsetting by
         // multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
-        let vtable_alloc = self.memory.get_raw_mut(vtable.alloc_id)?;
-        vtable_alloc.write_ptr_sized(&tcx, vtable, drop.into())?;
-
-        let size_ptr = vtable.offset(ptr_size, &tcx)?;
-        vtable_alloc.write_ptr_sized(&tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?;
-        let align_ptr = vtable.offset(ptr_size * 2, &tcx)?;
-        vtable_alloc.write_ptr_sized(&tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?;
-
-        for (i, method) in methods.iter().enumerate() {
-            if let Some((def_id, substs)) = *method {
-                // resolve for vtable: insert shims where needed
-                let instance =
-                    ty::Instance::resolve_for_vtable(tcx, self.param_env, def_id, substs)
-                        .ok_or_else(|| err_inval!(TooGeneric))?;
-                let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
-                // We cannot use `vtable_allic` as we are creating fn ptrs in this loop.
-                let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &tcx)?;
-                self.memory.get_raw_mut(vtable.alloc_id)?.write_ptr_sized(
-                    &tcx,
-                    method_ptr,
-                    fn_ptr.into(),
-                )?;
-            }
+        let mut vtable_alloc =
+            self.memory.get_mut(vtable.into(), vtable_size, ptr_align)?.expect("not a ZST");
+        vtable_alloc.write_ptr_sized(ptr_size * 0, drop.into())?;
+        vtable_alloc.write_ptr_sized(ptr_size * 1, Scalar::from_uint(size, ptr_size).into())?;
+        vtable_alloc.write_ptr_sized(ptr_size * 2, Scalar::from_uint(align, ptr_size).into())?;
+
+        for (i, fn_ptr) in fn_ptrs.into_iter() {
+            vtable_alloc.write_ptr_sized(ptr_size * (3 + i as u64), fn_ptr.into())?;
         }
 
         M::after_static_mem_initialized(self, vtable, vtable_size)?;
@@ -111,13 +111,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let vtable_slot = vtable.ptr_offset(ptr_size * idx.checked_add(3).unwrap(), self)?;
         let vtable_slot = self
             .memory
-            .check_ptr_access(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
+            .get(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
             .expect("cannot be a ZST");
-        let fn_ptr = self
-            .memory
-            .get_raw(vtable_slot.alloc_id)?
-            .read_ptr_sized(self, vtable_slot)?
-            .check_init()?;
+        let fn_ptr = vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?;
         self.memory.get_fn(fn_ptr)
     }
 
@@ -129,14 +125,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // We don't care about the pointee type; we just want a pointer.
         let vtable = self
             .memory
-            .check_ptr_access(
-                vtable,
-                self.tcx.data_layout.pointer_size,
-                self.tcx.data_layout.pointer_align.abi,
-            )?
+            .get(vtable, self.tcx.data_layout.pointer_size, self.tcx.data_layout.pointer_align.abi)?
             .expect("cannot be a ZST");
-        let drop_fn =
-            self.memory.get_raw(vtable.alloc_id)?.read_ptr_sized(self, vtable)?.check_init()?;
+        let drop_fn = vtable.read_ptr_sized(Size::ZERO)?.check_init()?;
         // We *need* an instance here, no other kind of function value, to be able
         // to determine the type.
         let drop_instance = self.memory.get_fn(drop_fn)?.as_instance()?;
@@ -161,13 +152,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // the size, and the align (which we read below).
         let vtable = self
             .memory
-            .check_ptr_access(vtable, 3 * pointer_size, self.tcx.data_layout.pointer_align.abi)?
+            .get(vtable, 3 * pointer_size, self.tcx.data_layout.pointer_align.abi)?
             .expect("cannot be a ZST");
-        let alloc = self.memory.get_raw(vtable.alloc_id)?;
-        let size = alloc.read_ptr_sized(self, vtable.offset(pointer_size, self)?)?.check_init()?;
+        let size = vtable.read_ptr_sized(pointer_size)?.check_init()?;
         let size = u64::try_from(self.force_bits(size, pointer_size)?).unwrap();
-        let align =
-            alloc.read_ptr_sized(self, vtable.offset(pointer_size * 2, self)?)?.check_init()?;
+        let align = vtable.read_ptr_sized(pointer_size * 2)?.check_init()?;
         let align = u64::try_from(self.force_bits(align, pointer_size)?).unwrap();
 
         if size >= self.tcx.data_layout.obj_size_bound() {
diff --git a/compiler/rustc_mir/src/interpret/validity.rs b/compiler/rustc_mir/src/interpret/validity.rs
index 83b0d0528f7..fb165a991bc 100644
--- a/compiler/rustc_mir/src/interpret/validity.rs
+++ b/compiler/rustc_mir/src/interpret/validity.rs
@@ -15,13 +15,13 @@ use rustc_middle::mir::interpret::InterpError;
 use rustc_middle::ty;
 use rustc_middle::ty::layout::TyAndLayout;
 use rustc_span::symbol::{sym, Symbol};
-use rustc_target::abi::{Abi, LayoutOf, Scalar, Size, VariantIdx, Variants};
+use rustc_target::abi::{Abi, LayoutOf, Scalar as ScalarAbi, Size, VariantIdx, Variants};
 
 use std::hash::Hash;
 
 use super::{
-    CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy,
-    ScalarMaybeUninit, ValueVisitor,
+    alloc_range, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine,
+    MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor,
 };
 
 macro_rules! throw_validation_failure {
@@ -329,7 +329,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
                     self.ecx.memory.check_ptr_access_align(
                         vtable,
                         3 * self.ecx.tcx.data_layout.pointer_size, // drop, size, align
-                        Some(self.ecx.tcx.data_layout.pointer_align.abi),
+                        self.ecx.tcx.data_layout.pointer_align.abi,
                         CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
                     ),
                     self.path,
@@ -411,11 +411,11 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
             // alignment should take attributes into account).
             .unwrap_or_else(|| (place.layout.size, place.layout.align.abi));
         // Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
-        let ptr: Option<_> = try_validation!(
+        try_validation!(
             self.ecx.memory.check_ptr_access_align(
                 place.ptr,
                 size,
-                Some(align),
+                align,
                 CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
             ),
             self.path,
@@ -441,9 +441,18 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
         );
         // Recursive checking
         if let Some(ref mut ref_tracking) = self.ref_tracking {
-            if let Some(ptr) = ptr {
+            // Proceed recursively even for ZST, no reason to skip them!
+            // `!` is a ZST and we want to validate it.
+            // Normalize before handing `place` to tracking because that will
+            // check for duplicates.
+            let place = if size.bytes() > 0 {
+                self.ecx.force_mplace_ptr(place).expect("we already bounds-checked")
+            } else {
+                place
+            };
+            // Skip validation entirely for some external statics
+            if let Scalar::Ptr(ptr) = place.ptr {
                 // not a ZST
-                // Skip validation entirely for some external statics
                 let alloc_kind = self.ecx.tcx.get_global_alloc(ptr.alloc_id);
                 if let Some(GlobalAlloc::Static(did)) = alloc_kind {
                     assert!(!self.ecx.tcx.is_thread_local_static(did));
@@ -473,15 +482,6 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
                     return Ok(());
                 }
             }
-            // Proceed recursively even for ZST, no reason to skip them!
-            // `!` is a ZST and we want to validate it.
-            // Normalize before handing `place` to tracking because that will
-            // check for duplicates.
-            let place = if size.bytes() > 0 {
-                self.ecx.force_mplace_ptr(place).expect("we already bounds-checked")
-            } else {
-                place
-            };
             let path = &self.path;
             ref_tracking.track(place, || {
                 // We need to clone the path anyway, make sure it gets created
@@ -638,7 +638,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
     fn visit_scalar(
         &mut self,
         op: &OpTy<'tcx, M::PointerTag>,
-        scalar_layout: &Scalar,
+        scalar_layout: &ScalarAbi,
     ) -> InterpResult<'tcx> {
         let value = self.read_scalar(op)?;
         let valid_range = &scalar_layout.valid_range;
@@ -851,16 +851,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                 let mplace = op.assert_mem_place(self.ecx);
                 // This is the length of the array/slice.
                 let len = mplace.len(self.ecx)?;
-                // Zero length slices have nothing to be checked.
-                if len == 0 {
-                    return Ok(());
-                }
                 // This is the element type size.
                 let layout = self.ecx.layout_of(tys)?;
                 // This is the size in bytes of the whole array. (This checks for overflow.)
                 let size = layout.size * len;
-                // Size is not 0, get a pointer.
-                let ptr = self.ecx.force_ptr(mplace.ptr)?;
 
                 // Optimization: we just check the entire range at once.
                 // NOTE: Keep this in sync with the handling of integer and float
@@ -872,10 +866,16 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                 // to reject those pointers, we just do not have the machinery to
                 // talk about parts of a pointer.
                 // We also accept uninit, for consistency with the slow path.
-                match self.ecx.memory.get_raw(ptr.alloc_id)?.check_bytes(
-                    self.ecx,
-                    ptr,
-                    size,
+                let alloc = match self.ecx.memory.get(mplace.ptr, size, mplace.align)? {
+                    Some(a) => a,
+                    None => {
+                        // Size 0, nothing more to check.
+                        return Ok(());
+                    }
+                };
+
+                match alloc.check_bytes(
+                    alloc_range(Size::ZERO, size),
                     /*allow_uninit_and_ptr*/ self.ctfe_mode.is_none(),
                 ) {
                     // In the happy case, we needn't check anything else.
@@ -885,12 +885,12 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                         // For some errors we might be able to provide extra information.
                         // (This custom logic does not fit the `try_validation!` macro.)
                         match err.kind() {
-                            err_ub!(InvalidUninitBytes(Some(access))) => {
+                            err_ub!(InvalidUninitBytes(Some((_alloc_id, access)))) => {
                                 // Some byte was uninitialized, determine which
                                 // element that byte belongs to so we can
                                 // provide an index.
                                 let i = usize::try_from(
-                                    access.uninit_ptr.offset.bytes() / layout.size.bytes(),
+                                    access.uninit_offset.bytes() / layout.size.bytes(),
                                 )
                                 .unwrap();
                                 self.path.push(PathElem::ArrayElem(i));
diff --git a/compiler/rustc_mir/src/lib.rs b/compiler/rustc_mir/src/lib.rs
index 783aa9465c3..69585979d41 100644
--- a/compiler/rustc_mir/src/lib.rs
+++ b/compiler/rustc_mir/src/lib.rs
@@ -21,6 +21,8 @@ Rust MIR: a lowered representation of Rust.
 #![feature(never_type)]
 #![feature(map_try_insert)]
 #![feature(min_specialization)]
+#![feature(slice_ptr_len)]
+#![feature(slice_ptr_get)]
 #![feature(trusted_len)]
 #![feature(try_blocks)]
 #![feature(associated_type_defaults)]